remove gstffmpegdecall. This was a temporary hack to get around some issues with our video/avi-centered mimetype syst...

Original commit message from CVS:
* remove gstffmpegdecall. This was a temporary hack to get around
some issues with our video/avi-centered mimetype system. Now that
we use proper identifiers, we don't need that piece of cruft anymore.
* update ffmpeg codec map for ffdec_*, plus make the uncompressed stream
caps also be formed by the codec mapper (this is easier to handle imo).
* add audio support (untested, though :P).
* remove the copying of buffer content and add a get_buffer() handler
instead.
* add an int to ffmpegenc to make it compile and resolve the symbol. I'll
truely fix that later on.
* make mimetypes in the ffmpeg codec mapper conform to docs/random/mimetypes
* etc.

Oh, and let's not forget that it can play back DIVX5 et all nicely. Support
for SVQ1/3 is also there, so if someone completes qtdemux, we can watch
these, too.
This commit is contained in:
Ronald S. Bultje 2003-06-07 00:41:32 +00:00
parent 56ab463205
commit 80924bdfdc
7 changed files with 840 additions and 1607 deletions

View file

@ -1,6 +1,6 @@
plugindir = $(libdir)/gstreamer-@GST_MAJORMINOR@
plugin_LTLIBRARIES = libgstffmpeg.la libgstffmpegall.la
plugin_LTLIBRARIES = libgstffmpeg.la
libgstffmpeg_la_SOURCES = gstffmpeg.c \
gstffmpegcodecmap.c \
@ -20,15 +20,4 @@ libgstffmpeg_la_LIBADD = \
libgstffmpeg_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS)
libgstffmpegall_la_SOURCES = gstffmpegall.c \
gstffmpegallcodecmap.c
libgstffmpegall_la_CFLAGS = $(GST_CFLAGS) \
-I $(top_builddir)/gst-libs/ext/ffmpeg/ffmpeg/libavcodec \
-I $(top_builddir)/gst-libs/ext/ffmpeg/ffmpeg/libavformat
libgstffmpegall_la_LIBADD = \
$(top_builddir)/gst-libs/ext/ffmpeg/ffmpeg/libavcodec/libavcodec.a
libgstffmpegall_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS)
noinst_HEADERS = gstffmpegallcodecmap.h
noinst_HEADERS = gstffmpegcodecmap.h

View file

@ -1,437 +0,0 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <assert.h>
#include <string.h>
#ifdef HAVE_FFMPEG_UNINSTALLED
#include <avcodec.h>
#else
#include <ffmpeg/avcodec.h>
#endif
#include <gst/gst.h>
#include "gstffmpegallcodecmap.h"
typedef struct _GstFFMpegDecAll {
GstElement element;
GstPad *srcpad,
*sinkpad;
AVCodecContext *context;
AVFrame picture;
gboolean opened;
} GstFFMpegDecAll;
typedef struct _GstFFMpegDecAllClass {
GstElementClass parent_class;
} GstFFMpegDecAllClass;
#define GST_TYPE_FFMPEGDECALL \
(gst_ffmpegdecall_get_type())
#define GST_FFMPEGDECALL(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDECALL,GstFFMpegDecAll))
#define GST_FFMPEGDECALL_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDECALL,GstFFMpegDecClassAll))
#define GST_IS_FFMPEGDECALL(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDECALL))
#define GST_IS_FFMPEGDECALL_CLASS(obj) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDECALL))
GST_PAD_TEMPLATE_FACTORY(src_templ,
"src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_CAPS_NEW (
"gstffmpeg_src_videoyuv",
"video/raw",
"format", GST_PROPS_LIST (
GST_PROPS_FOURCC (GST_MAKE_FOURCC('Y','U','Y','2')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('I','4','2','0')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('Y','4','1','P'))
),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
),
GST_CAPS_NEW (
"gstffmpeg_src_videorgb",
"video/raw",
"format", GST_PROPS_FOURCC (GST_MAKE_FOURCC('R','G','B',' ')),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096),
"bpp", GST_PROPS_INT_RANGE (16, 32),
"depth", GST_PROPS_INT_RANGE (15, 32),
"endianness", GST_PROPS_INT (G_BYTE_ORDER)
) /*,
GST_CAPS_NEW (
"avidemux_src_audio",
"audio/raw",
"format", GST_PROPS_STRING ("int"),
"law", GST_PROPS_INT (0),
"endianness", GST_PROPS_INT (G_BYTE_ORDER),
"signed", GST_PROPS_LIST (
GST_PROPS_BOOLEAN (TRUE),
GST_PROPS_BOOLEAN (FALSE)
),
"width", GST_PROPS_LIST (
GST_PROPS_INT (8),
GST_PROPS_INT (16)
),
"depth", GST_PROPS_LIST (
GST_PROPS_INT (8),
GST_PROPS_INT (16)
),
"rate", GST_PROPS_INT_RANGE (11025, 96000),
"channels", GST_PROPS_INT_RANGE (1, 2)
) */
)
GST_PAD_TEMPLATE_FACTORY(sink_templ,
"sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_CAPS_NEW (
"gstffmpeg_sink_avivideo",
"video/avi",
"format", GST_PROPS_STRING("strf_vids"),
/*"compression", GST_PROPS_LIST (
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','J','P','G')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('J','P','E','G')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('V','I','X','L')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('P','I','X','L')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('H','F','Y','U')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('V','I','X','L')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('D','V','S','D')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('d','v','s','d')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','P','E','G')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','P','G','I')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('H','2','6','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('i','2','6','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('L','2','6','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','2','6','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('V','D','O','W')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('V','I','V','O')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('x','2','6','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('D','I','V','X')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('d','i','v','x')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('D','I','V','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('D','I','V','4')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('D','I','V','5')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('D','X','5','o')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('X','V','I','D')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('x','v','i','d')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','P','G','4')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','P','4','2')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('M','P','4','3')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('W','M','V','1')),
GST_PROPS_FOURCC (GST_MAKE_FOURCC('W','M','V','2'))
),*/
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
),
GST_CAPS_NEW (
"gstffmpeg_sink_dv",
"video/dv",
"format", GST_PROPS_LIST (
GST_PROPS_STRING ("NTSC"),
GST_PROPS_STRING ("PAL")
),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
),
GST_CAPS_NEW (
"gstffmpeg_sink_h263",
"video/H263",
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
),
GST_CAPS_NEW (
"gstffmpeg_sink_mpeg",
"video/mpeg",
"systemstream", GST_PROPS_BOOLEAN(FALSE),
"mpegversion", GST_PROPS_INT(1) /*,
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)*/
),
GST_CAPS_NEW (
"gstffmpeg_sink_jpeg",
"video/jpeg",
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
),
GST_CAPS_NEW (
"gstffmpeg_sink_wmv",
"video/wmv",
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
)
)
/* A number of functon prototypes are given so we can refer to them later. */
static void gst_ffmpegdecall_class_init (GstFFMpegDecAllClass *klass);
static void gst_ffmpegdecall_init (GstFFMpegDecAll *ffmpegdec);
static void gst_ffmpegdecall_destroy (GObject *obj);
static void gst_ffmpegdecall_chain (GstPad *pad, GstBuffer *buffer);
static GstPadLinkReturn gst_ffmpegdecall_connect (GstPad *pad, GstCaps *caps);
static GstElementClass *parent_class = NULL;
/* elementfactory information */
GstElementDetails gst_ffmpegdecall_details = {
"FFMPEG codec wrapper",
"Codec/Audio-Video/FFMpeg",
"LGPL",
"FFMpeg-based video/audio decoder",
VERSION,
"Ronald Bultje <rbultje@ronald.bitfreak.net>",
"(C) 2002",
};
GType
gst_ffmpegdecall_get_type(void)
{
static GType ffmpegdecall_type = 0;
if (!ffmpegdecall_type)
{
static const GTypeInfo ffmpegdecall_info = {
sizeof(GstFFMpegDecAllClass),
NULL,
NULL,
(GClassInitFunc)gst_ffmpegdecall_class_init,
NULL,
NULL,
sizeof(GstFFMpegDecAll),
0,
(GInstanceInitFunc)gst_ffmpegdecall_init,
};
ffmpegdecall_type = g_type_register_static(GST_TYPE_ELEMENT,
"GstFFMpegDecAll",
&ffmpegdecall_info, 0);
}
return ffmpegdecall_type;
}
static void
gst_ffmpegdecall_class_init (GstFFMpegDecAllClass *klass)
{
GObjectClass *obj_class = (GObjectClass*) klass;
parent_class = g_type_class_ref(GST_TYPE_ELEMENT);
obj_class->dispose = gst_ffmpegdecall_destroy;
}
static void
gst_ffmpegdecall_init(GstFFMpegDecAll *ffmpegdec)
{
ffmpegdec->sinkpad = gst_pad_new_from_template(
GST_PAD_TEMPLATE_GET(sink_templ), "sink");
gst_pad_set_link_function(ffmpegdec->sinkpad,
gst_ffmpegdecall_connect);
gst_pad_set_chain_function(ffmpegdec->sinkpad,
gst_ffmpegdecall_chain);
ffmpegdec->srcpad = gst_pad_new_from_template(
GST_PAD_TEMPLATE_GET(src_templ), "src");
gst_element_add_pad(GST_ELEMENT(ffmpegdec),
ffmpegdec->sinkpad);
gst_element_add_pad(GST_ELEMENT(ffmpegdec),
ffmpegdec->srcpad);
ffmpegdec->context = avcodec_alloc_context();
ffmpegdec->opened = FALSE;
}
static void
gst_ffmpegdecall_destroy (GObject *obj)
{
GstFFMpegDecAll *ffmpegdec = GST_FFMPEGDECALL(obj);
if (ffmpegdec->opened) {
avcodec_close(ffmpegdec->context);
ffmpegdec->opened = FALSE;
}
av_free(ffmpegdec->context);
G_OBJECT_CLASS (parent_class)->dispose (obj);
}
static GstPadLinkReturn
gst_ffmpegdecall_connect (GstPad *pad, GstCaps *caps)
{
GstFFMpegDecAll *ffmpegdec = GST_FFMPEGDECALL(gst_pad_get_parent(pad));
enum CodecID id;
AVCodec *plugin;
if (!GST_CAPS_IS_FIXED(caps))
return GST_PAD_LINK_DELAYED;
avcodec_get_context_defaults(ffmpegdec->context);
if ((id = gst_ffmpeg_caps_to_codecid(caps, ffmpegdec->context)) == CODEC_ID_NONE) {
GST_DEBUG(GST_CAT_PLUGIN_INFO,
"Failed to find corresponding codecID");
return GST_PAD_LINK_REFUSED;
}
if ((plugin = avcodec_find_decoder(id)) == NULL) {
GST_DEBUG(GST_CAT_PLUGIN_INFO,
"Failed to find an avdecoder for id=%d", id);
return GST_PAD_LINK_REFUSED;
}
/* we dont send complete frames */
if (plugin->capabilities & CODEC_CAP_TRUNCATED)
ffmpegdec->context->flags |= CODEC_FLAG_TRUNCATED;
if (avcodec_open(ffmpegdec->context, plugin)) {
GST_DEBUG(GST_CAT_PLUGIN_INFO,
"Failed to open FFMPEG codec for id=%d", id);
ffmpegdec->opened = FALSE;
return GST_PAD_LINK_REFUSED;
}
ffmpegdec->opened = TRUE;
return GST_PAD_LINK_OK;
}
static void
gst_ffmpegdecall_chain (GstPad *pad, GstBuffer *inbuf)
{
GstBuffer *outbuf;
GstFFMpegDecAll *ffmpegdec = GST_FFMPEGDECALL(gst_pad_get_parent (pad));
guchar *data;
gint size, frame_size, len;
gint have_picture;
data = GST_BUFFER_DATA (inbuf);
size = GST_BUFFER_SIZE (inbuf);
do {
ffmpegdec->context->frame_number++;
len = avcodec_decode_video (ffmpegdec->context, &ffmpegdec->picture,
&have_picture, data, size);
if (len < 0) {
g_warning ("ffmpegdec: failed to decode frame");
break;
}
if (have_picture) {
guchar *picdata, *picdata2, *outdata, *outdata2;
gint xsize, i, width, height;
height = ffmpegdec->context->height;
width = ffmpegdec->context->width;
if (!GST_PAD_CAPS(ffmpegdec->srcpad)) {
GstCaps *newcaps = gst_ffmpeg_codecid_to_caps(CODEC_ID_RAWVIDEO,
ffmpegdec->context);
if (!newcaps) {
gst_element_error(GST_ELEMENT(ffmpegdec),
"Failed to create caps for ffmpeg (pix_fmt=%d)",
ffmpegdec->context->pix_fmt);
break;
}
if (gst_pad_try_set_caps(ffmpegdec->srcpad, newcaps) <= 0) {
gst_element_error(GST_ELEMENT(ffmpegdec),
"Failed to set caps on the other end");
break;
}
}
frame_size = width * height;
outbuf = gst_buffer_new ();
GST_BUFFER_SIZE (outbuf) = (frame_size*3)>>1;
outdata = GST_BUFFER_DATA (outbuf) = g_malloc (GST_BUFFER_SIZE (outbuf));
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
picdata = ffmpegdec->picture.data[0];
xsize = ffmpegdec->picture.linesize[0];
for (i=height; i; i--) {
memcpy (outdata, picdata, width);
outdata += width;
picdata += xsize;
}
frame_size >>= 2;
width >>= 1;
height >>= 1;
outdata2 = outdata + frame_size;
picdata = ffmpegdec->picture.data[1];
picdata2 = ffmpegdec->picture.data[2];
xsize = ffmpegdec->picture.linesize[1];
for (i=height; i; i--) {
memcpy (outdata, picdata, width);
memcpy (outdata2, picdata2, width);
outdata += width; outdata2 += width;
picdata += xsize; picdata2 += xsize;
}
gst_pad_push (ffmpegdec->srcpad, outbuf);
}
size -= len;
data += len;
} while (size > 0);
gst_buffer_unref (inbuf);
}
static gboolean
plugin_init (GModule *module, GstPlugin *plugin)
{
GstElementFactory *factory;
avcodec_init ();
avcodec_register_all ();
/* create an elementfactory for the element */
factory = gst_element_factory_new("ffmpegdecall",
GST_TYPE_FFMPEGDECALL,
&gst_ffmpegdecall_details);
g_return_val_if_fail(factory != NULL, FALSE);
gst_element_factory_set_rank(factory, GST_ELEMENT_RANK_MARGINAL);
gst_element_factory_add_pad_template(factory,
GST_PAD_TEMPLATE_GET(src_templ));
gst_element_factory_add_pad_template(factory,
GST_PAD_TEMPLATE_GET(sink_templ));
gst_plugin_add_feature(plugin, GST_PLUGIN_FEATURE(factory));
return TRUE;
}
GstPluginDesc plugin_desc = {
GST_VERSION_MAJOR,
GST_VERSION_MINOR,
"ffmpegdecall",
plugin_init
};

View file

@ -1,718 +0,0 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#ifdef HAVE_FFMPEG_UNINSTALLED
#include <avcodec.h>
#else
#include <ffmpeg/avcodec.h>
#endif
#include <string.h>
#include <gst/gst.h>
#include "gstffmpegallcodecmap.h"
/* Convert a FFMPEG codec ID and optional AVCodecContext
* to a GstCaps. If the context ix ommitted, no values for
* video/audio size will be included in the GstCaps
*/
GstCaps *
gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
AVCodecContext *context)
{
GstCaps *caps = NULL;
guint32 fourcc = 0;
g_return_val_if_fail (codec_id != CODEC_ID_NONE, NULL);
switch (codec_id) {
case CODEC_ID_MPEG1VIDEO:
fourcc = GST_MAKE_FOURCC('M','P','E','G');
if (context) {
caps = GST_CAPS_NEW ("ffmpeg_mpeg1video",
"video/mpeg",
"mpegversion", GST_PROPS_INT (1),
"systemstream", GST_PROPS_BOOLEAN (FALSE),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
NULL
);
} else {
caps = GST_CAPS_NEW ("ffmpeg_mpeg1video",
"video/mpeg",
"mpegversion", GST_PROPS_INT (1),
"systemstream", GST_PROPS_BOOLEAN (FALSE),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096),
NULL
);
}
break;
case CODEC_ID_H263P:
case CODEC_ID_H263I:
case CODEC_ID_H263:
fourcc = GST_MAKE_FOURCC('H','2','6','3');
caps = GST_CAPS_NEW ("ffmpeg_h263",
"video/H263",
NULL
);
break;
case CODEC_ID_RV10:
/* .. */
break;
case CODEC_ID_MP2:
case CODEC_ID_MP3LAME:
caps = GST_CAPS_NEW ("ffmpeg_mp2_mp3",
"audio/x-mp3",
NULL
);
break;
case CODEC_ID_VORBIS:
caps = GST_CAPS_NEW ("ffmpeg_vorbis",
"application/x-ogg",
NULL);
break;
case CODEC_ID_AC3:
caps = GST_CAPS_NEW ("ffmpeg_ac3",
"audio/ac3",
NULL);
break;
case CODEC_ID_MJPEG:
case CODEC_ID_MJPEGB:
fourcc = GST_MAKE_FOURCC ('M','J','P','G');
if (context) {
caps = GST_CAPS_NEW ("ffmpeg_mjpeg",
"video/jpeg",
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
NULL
);
} else {
caps = GST_CAPS_NEW ("ffmpeg_mjpeg",
"video/jpeg",
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096),
NULL
);
}
break;
case CODEC_ID_MPEG4:
fourcc = GST_MAKE_FOURCC ('D','I','V','X');
break;
case CODEC_ID_RAWVIDEO:
if (context) {
int bpp = 0, depth = 0, endianness = 0;
gulong g_mask = 0, r_mask = 0, b_mask = 0;
guint32 fmt = 0;
switch (context->pix_fmt) {
case PIX_FMT_YUV420P:
fmt = GST_MAKE_FOURCC ('I','4','2','0');
break;
case PIX_FMT_YUV422:
fmt = GST_MAKE_FOURCC ('Y','U','Y','2');
break;
case PIX_FMT_RGB24:
bpp = depth = 24;
endianness = G_BIG_ENDIAN;
r_mask = 0xff0000; g_mask = 0x00ff00; b_mask = 0x0000ff;
break;
case PIX_FMT_BGR24:
bpp = depth = 24;
endianness = G_LITTLE_ENDIAN;
r_mask = 0xff0000; g_mask = 0x00ff00; b_mask = 0x0000ff;
break;
case PIX_FMT_YUV422P:
/* .. */
break;
case PIX_FMT_YUV444P:
/* .. */
break;
case PIX_FMT_RGBA32:
bpp = depth = 32;
endianness = G_BYTE_ORDER;
r_mask = 0x00ff0000; g_mask = 0x0000ff00; b_mask = 0x000000ff;
break;
case PIX_FMT_YUV410P:
/* .. */
break;
case PIX_FMT_YUV411P:
fmt = GST_MAKE_FOURCC ('Y','4','1','P');
break;
case PIX_FMT_RGB565:
bpp = depth = 16;
endianness = G_BYTE_ORDER;
r_mask = 0xf800; g_mask = 0x07e0; b_mask = 0x001f;
break;
case PIX_FMT_RGB555:
bpp = 16; depth = 15;
endianness = G_BYTE_ORDER;
r_mask = 0x7c00; g_mask = 0x03e0; b_mask = 0x001f;
break;
default:
/* give up ... */
break;
}
if (bpp != 0) {
caps = GST_CAPS_NEW ("ffmpeg_rawvideo",
"video/raw",
"format", GST_PROPS_FOURCC (GST_MAKE_FOURCC ('R','G','B',' ')),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
"bpp", GST_PROPS_INT (bpp),
"depth", GST_PROPS_INT (depth),
"red_mask", GST_PROPS_INT (r_mask),
"green_mask", GST_PROPS_INT (g_mask),
"blue_mask", GST_PROPS_INT (b_mask),
"endianness", GST_PROPS_INT (endianness),
NULL
);
} else if (fmt) {
caps = GST_CAPS_NEW ("ffmpeg_rawvideo",
"video/raw",
"format", GST_PROPS_FOURCC (fmt),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
NULL
);
}
} else {
caps = GST_CAPS_NEW ("ffpeg_rawvideo",
"video/raw",
NULL
);
}
break;
case CODEC_ID_MSMPEG4V1:
fourcc = GST_MAKE_FOURCC ('M','P','G','4');
break;
case CODEC_ID_MSMPEG4V2:
fourcc = GST_MAKE_FOURCC ('M','P','4','2');
break;
case CODEC_ID_MSMPEG4V3:
fourcc = GST_MAKE_FOURCC ('M','P','4','3');
break;
case CODEC_ID_WMV1:
fourcc = GST_MAKE_FOURCC ('W','M','V','1');
case CODEC_ID_WMV2:
if (!fourcc) /* EVIL! */
fourcc = GST_MAKE_FOURCC ('W','M','V','2');
if (context) {
caps = GST_CAPS_NEW ("ffmpeg_wmv",
"video/wmv",
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
NULL
);
} else {
caps = GST_CAPS_NEW ("ffmpeg_wmv",
"video/wmv",
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096),
NULL
);
}
break;
case CODEC_ID_SVQ1:
/* .. */
break;
case CODEC_ID_DVVIDEO:
fourcc = GST_MAKE_FOURCC('D','V','S','D');
/* fall-through */
case CODEC_ID_DVAUDIO:
if (context) {
caps = GST_CAPS_NEW ("ffmpeg_dvvideo",
"video/dv",
"format", GST_PROPS_LIST (
GST_PROPS_STRING ("NTSC"),
GST_PROPS_STRING ("PAL")
),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096),
NULL
);
} else {
caps = GST_CAPS_NEW ("ffmpeg_dvvideo",
"video/dv",
"format", GST_PROPS_STRING ("NTSC"), /* FIXME */
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
NULL
);
}
break;
case CODEC_ID_WMAV1:
case CODEC_ID_WMAV2:
caps = GST_CAPS_NEW ("ffmpeg_wma",
"audio/x-wma",
NULL
);
break;
case CODEC_ID_MACE3:
/* .. */
break;
case CODEC_ID_MACE6:
/* .. */
break;
case CODEC_ID_HUFFYUV:
fourcc = GST_MAKE_FOURCC('H','F','Y','U');
break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
case CODEC_ID_PCM_MULAW:
case CODEC_ID_PCM_ALAW:
do {
gint law = -1, width = 0, depth = 0, endianness = 0;
gboolean signedness = FALSE; /* blabla */
switch (codec_id) {
case CODEC_ID_PCM_S16LE:
law = 0; width = 16; depth = 16;
endianness = G_LITTLE_ENDIAN;
signedness = TRUE;
break;
case CODEC_ID_PCM_S16BE:
law = 0; width = 16; depth = 16;
endianness = G_BIG_ENDIAN;
signedness = TRUE;
break;
case CODEC_ID_PCM_U16LE:
law = 0; width = 16; depth = 16;
endianness = G_LITTLE_ENDIAN;
signedness = FALSE;
break;
case CODEC_ID_PCM_U16BE:
law = 0; width = 16; depth = 16;
endianness = G_BIG_ENDIAN;
signedness = FALSE;
break;
case CODEC_ID_PCM_S8:
law = 0; width = 8; depth = 8;
endianness = G_BYTE_ORDER;
signedness = TRUE;
break;
case CODEC_ID_PCM_U8:
law = 0; width = 8; depth = 8;
endianness = G_BYTE_ORDER;
signedness = FALSE;
break;
case CODEC_ID_PCM_MULAW:
law = 1; width = 8; depth = 8;
endianness = G_BYTE_ORDER;
signedness = FALSE;
break;
case CODEC_ID_PCM_ALAW:
law = 2; width = 8; depth = 8;
endianness = G_BYTE_ORDER;
signedness = FALSE;
break;
default:
g_assert(0); /* don't worry, we never get here */
break;
}
if (context) {
caps = GST_CAPS_NEW ("ffmpeg_pcmaudio",
"audio/raw",
"format", GST_PROPS_STRING ("int"),
"law", GST_PROPS_INT (law),
"width", GST_PROPS_INT (width),
"depth", GST_PROPS_INT (depth),
"endianness", GST_PROPS_INT (endianness),
"signed", GST_PROPS_BOOLEAN (signedness),
"rate", GST_PROPS_INT (context->sample_rate),
"channels", GST_PROPS_INT (context->channels),
NULL
);
} else {
caps = GST_CAPS_NEW ("ffmpeg_pcmaudio",
"audio/raw",
"format", GST_PROPS_STRING ("int"),
"law", GST_PROPS_INT (law),
"width", GST_PROPS_INT (width),
"depth", GST_PROPS_INT (depth),
"endianness", GST_PROPS_INT (endianness),
"signed", GST_PROPS_BOOLEAN (signedness),
"rate", GST_PROPS_INT_RANGE (1000, 48000),
"channels", GST_PROPS_INT_RANGE (1, 2),
NULL
);
}
} while (0);
break;
case CODEC_ID_ADPCM_IMA_QT:
/* .. */
break;
case CODEC_ID_ADPCM_IMA_WAV:
/* .. */
break;
case CODEC_ID_ADPCM_MS:
/* .. */
break;
default:
/* .. */
break;
}
if (fourcc) {
GstCaps *avi_caps;
if (context) {
avi_caps = GST_CAPS_NEW ("ffmpeg_mjpeg2",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (fourcc),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height),
NULL
);
} else {
avi_caps = GST_CAPS_NEW ("ffmpeg_mjpeg2",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (fourcc),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096),
NULL
);
}
if (caps)
caps = gst_caps_append(caps, avi_caps);
else
caps = avi_caps;
}
if (caps != NULL) {
char *str = g_strdup_printf("The caps that belongs to codec_id=%d", codec_id);
gst_caps_debug(caps, str);
g_free(str);
}
return caps;
}
/* Convert a GstCaps to a FFMPEG codec ID. Size et all
* are omitted, that can be queried by the user itself,
* we're not eating the GstCaps or anything
* A pointer to an allocated context is also needed for
* optional extra info (not used yet, though)
*/
enum CodecID
gst_ffmpeg_caps_to_codecid (GstCaps *caps,
AVCodecContext *context)
{
enum CodecID id = CODEC_ID_NONE;
const gchar *mimetype;
gboolean video = FALSE;
g_return_val_if_fail (caps != NULL, CODEC_ID_NONE);
mimetype = gst_caps_get_mime(caps);
if (!strcmp(mimetype, "video/avi")) {
const gchar *format = NULL;
if (gst_caps_has_property(caps, "format")) {
gst_caps_get_string(caps, "format", &format);
}
if (format && !strcmp(format, "strf_vids")) {
guint32 compression = 0;
if (gst_caps_has_property(caps, "compression")) {
gst_caps_get_fourcc_int(caps, "compression", &compression);
}
switch (compression) {
case GST_MAKE_FOURCC('M','J','P','G'):
case GST_MAKE_FOURCC('J','P','E','G'):
case GST_MAKE_FOURCC('P','I','X','L'): /* these two are used by Pinnacle */
case GST_MAKE_FOURCC('V','I','X','L'): /* and Miro for Zoran/JPEG codecs */
id = CODEC_ID_MJPEG; /* or MJPEGB */
break;
case GST_MAKE_FOURCC('H','F','Y','U'):
id = CODEC_ID_HUFFYUV;
break;
case GST_MAKE_FOURCC('D','V','S','D'):
case GST_MAKE_FOURCC('d','v','s','d'):
id = CODEC_ID_DVVIDEO;
break;
case GST_MAKE_FOURCC('M','P','E','G'):
case GST_MAKE_FOURCC('M','P','G','I'):
id = CODEC_ID_MPEG1VIDEO;
break;
case GST_MAKE_FOURCC('H','2','6','3'):
case GST_MAKE_FOURCC('i','2','6','3'):
case GST_MAKE_FOURCC('L','2','6','3'):
case GST_MAKE_FOURCC('M','2','6','3'):
case GST_MAKE_FOURCC('V','D','O','W'):
case GST_MAKE_FOURCC('V','I','V','O'):
case GST_MAKE_FOURCC('x','2','6','3'):
id = CODEC_ID_H263; /* or H263[IP] */
break;
case GST_MAKE_FOURCC('d','i','v','x'):
case GST_MAKE_FOURCC('D','I','V','3'):
case GST_MAKE_FOURCC('D','I','V','4'):
case GST_MAKE_FOURCC('D','I','V','5'):
case GST_MAKE_FOURCC('M','P','4','3'):
id = CODEC_ID_MSMPEG4V3;
break;
case GST_MAKE_FOURCC('D','I','V','X'):
case GST_MAKE_FOURCC('D','X','5','0'):
case GST_MAKE_FOURCC('X','V','I','D'):
case GST_MAKE_FOURCC('x','v','i','d'):
id = CODEC_ID_MPEG4;
break;
case GST_MAKE_FOURCC('M','P','G','4'):
id = CODEC_ID_MSMPEG4V1;
break;
case GST_MAKE_FOURCC('M','P','4','2'):
id = CODEC_ID_MSMPEG4V2;
break;
case GST_MAKE_FOURCC('W','M','V','1'):
id = CODEC_ID_WMV1;
break;
case GST_MAKE_FOURCC('W','M','V','2'):
id = CODEC_ID_WMV2;
break;
}
video = TRUE;
} else if (format && !strcmp(format, "strf_auds")) {
/* .. */
}
} else if (!strcmp(mimetype, "video/raw")) {
id = CODEC_ID_RAWVIDEO; /* don't we need to provide more info here? */
if (context) {
gint depth = 0, endianness = 0;
guint32 fmt_fcc = 0;
gst_caps_get_fourcc_int(caps, "format", &fmt_fcc);
switch (fmt_fcc) {
case GST_MAKE_FOURCC('R','G','B',' '):
gst_caps_get_int(caps, "endianness", &endianness);
gst_caps_get_int(caps, "depth", &depth);
switch (depth) {
case 15:
context->pix_fmt = PIX_FMT_RGB555;
break;
case 16:
context->pix_fmt = PIX_FMT_RGB565;
break;
case 24:
if (endianness == G_BIG_ENDIAN) {
context->pix_fmt = PIX_FMT_RGB24;
} else {
context->pix_fmt = PIX_FMT_BGR24;
}
break;
case 32:
context->pix_fmt = PIX_FMT_RGBA32;
break;
}
break;
case GST_MAKE_FOURCC('Y','U','Y','2'):
context->pix_fmt = PIX_FMT_YUV422;
break;
case GST_MAKE_FOURCC('I','4','2','0'):
case GST_MAKE_FOURCC('I','Y','U','V'):
context->pix_fmt = PIX_FMT_YUV420P;
break;
case GST_MAKE_FOURCC('Y','4','1','P'):
context->pix_fmt = PIX_FMT_YUV411P;
break;
}
video = TRUE;
}
} else if (!strcmp(mimetype, "audio/raw")) {
gint law = -1, depth = 0, width = 0, endianness = 0;
gboolean signedness = FALSE; /* bla default value */
if (gst_caps_has_property(caps, "signedness")) {
gst_caps_get_int(caps, "endianness", &endianness);
gst_caps_get_boolean(caps, "endianness", &signedness);
gst_caps_get_int(caps, "law", &law);
gst_caps_get_int(caps, "width", &width);
gst_caps_get_int(caps, "depth", &depth);
if (context) {
context->sample_rate = 0;
context->channels = 0;
gst_caps_get_int(caps, "channels", &context->channels);
gst_caps_get_int(caps, "rate", &context->sample_rate);
}
g_return_val_if_fail(depth == width, CODEC_ID_NONE);
switch (law) {
case 0:
switch (depth) {
case 8:
if (signedness) {
id = CODEC_ID_PCM_S8;
} else {
id = CODEC_ID_PCM_U8;
}
break;
case 16:
switch (endianness) {
case G_BIG_ENDIAN:
if (signedness) {
id = CODEC_ID_PCM_S16BE;
} else {
id = CODEC_ID_PCM_U16BE;
}
break;
case G_LITTLE_ENDIAN:
if (signedness) {
id = CODEC_ID_PCM_S16LE;
} else {
id = CODEC_ID_PCM_U16LE;
}
break;
}
break;
}
break;
case 1:
id = CODEC_ID_PCM_MULAW;
break;
case 2:
id = CODEC_ID_PCM_ALAW;
break;
}
}
} else if (!strcmp(mimetype, "video/dv")) {
id = CODEC_ID_DVVIDEO; /* or DVAUDIO */
video = TRUE;
} else if (!strcmp(mimetype, "video/H263")) {
id = CODEC_ID_H263; /* or H263[IP] */
video = TRUE;
} else if (!strcmp(mimetype, "video/mpeg")) {
gboolean sys_strm = TRUE;
gint mpegversion = 0;
if (gst_caps_has_property(caps, "systemstream")) {
gst_caps_get_boolean(caps, "systemstream", &sys_strm);
}
if (!sys_strm && gst_caps_has_property(caps, "mpegversion")) {
gst_caps_get_int(caps, "mpegversion", &mpegversion);
if (mpegversion == 1) {
id = CODEC_ID_MPEG1VIDEO;
}
}
video = TRUE;
} else if (!strcmp(mimetype, "video/jpeg")) {
id = CODEC_ID_MJPEG;
video = TRUE;
} else if (!strcmp(mimetype, "video/wmv")) {
id = CODEC_ID_WMV2; /* or WMV1 */
video = TRUE;
} else if (!strcmp(mimetype, "application/x-ogg")) {
id = CODEC_ID_VORBIS;
} else if (!strcmp(mimetype, "audio/x-mp3")) {
id = CODEC_ID_MP3LAME; /* or MP2 */
} else if (!strcmp(mimetype, "audio/x-wma")) {
id = CODEC_ID_WMAV2; /* or WMAV1 */
} else if (!strcmp(mimetype, "audio/ac3")) {
id = CODEC_ID_AC3;
}
if (video && context) {
if (gst_caps_has_property(caps, "width"))
gst_caps_get_int(caps, "width", &context->width);
if (gst_caps_has_property(caps, "height"))
gst_caps_get_int(caps, "height", &context->height);
/* framerate (context->frame_rate)? but then, we'd need a GstPad* */
context->codec_type = CODEC_TYPE_VIDEO;
} else {
context->codec_type = CODEC_TYPE_AUDIO;
}
context->codec_id = id;
if (id != CODEC_ID_NONE) {
char *str = g_strdup_printf("The id=%d belongs to this caps", id);
gst_caps_debug(caps, str);
g_free(str);
}
return id;
}

View file

@ -1,5 +1,7 @@
/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
* This file:
* Copyright (c) 2002-2003 Ronald Bultje <rbultje@ronald.bitfreak.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@ -23,283 +25,513 @@
#else
#include <ffmpeg/avcodec.h>
#endif
#include <string.h>
#include <gst/gst.h>
#include "gstffmpegcodecmap.h"
/* this macro makes a caps width fixed or unfixed width/height
* properties depending on whether we've got a context.
*
* See below for why we use this.
*/
#define GST_FF_VID_CAPS_NEW(name, mimetype, props...) \
(context != NULL) ? \
GST_CAPS_NEW (name, \
mimetype, \
"width", GST_PROPS_INT (context->width), \
"height", GST_PROPS_INT (context->height),\
##props) \
: \
GST_CAPS_NEW (name, \
mimetype, \
"width", GST_PROPS_INT_RANGE (16, 4096), \
"height", GST_PROPS_INT_RANGE (16, 4096), \
##props)
/* same for audio - now with channels/sample rate
*/
#define GST_FF_AUD_CAPS_NEW(name, mimetype, props...) \
(context != NULL) ? \
GST_CAPS_NEW (name, \
mimetype, \
"rate", GST_PROPS_INT (context->sample_rate), \
"channels", GST_PROPS_INT (context->channels), \
##props) \
: \
GST_CAPS_NEW (name, \
mimetype, \
"rate", GST_PROPS_INT_RANGE (8000, 96000), \
"channels", GST_PROPS_INT_RANGE (1, 2), \
##props)
/* Convert a FFMPEG codec ID and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* CodecID is primarily meant for compressed data GstCaps!
*/
GstCaps *
gst_ffmpegcodec_codec_context_to_caps (AVCodecContext *context, int codec_id)
gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
AVCodecContext *context)
{
GstCaps *caps = NULL;
switch (codec_id) {
case CODEC_ID_NONE:
return GST_CAPS_NEW ("ffmpeg_none",
"unknown/unknown",
NULL);
break;
case CODEC_ID_MPEG1VIDEO:
return GST_CAPS_NEW ("ffmpeg_mpeg1video",
"video/mpeg",
"mpegversion", GST_PROPS_INT (1),
"systemstream", GST_PROPS_BOOLEAN (FALSE)
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_mpeg1video",
"video/mpeg",
"mpegversion", GST_PROPS_INT (1),
"systemstream", GST_PROPS_BOOLEAN (FALSE)
);
break;
case CODEC_ID_H263P:
case CODEC_ID_H263I:
case CODEC_ID_H263:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_h263",
"video/h263"
);
break;
case CODEC_ID_RV10:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_rv10",
"video/realvideo"
);
break;
case CODEC_ID_MP2:
caps = GST_CAPS_NEW ("ffmpeg_mp2",
"audio/x-mp3",
"layer", GST_PROPS_INT (2)
);
break;
case CODEC_ID_MP3LAME:
caps = GST_CAPS_NEW ("ffmpeg_mp3",
"audio/x-mp3",
"layer", GST_PROPS_INT (3)
);
break;
case CODEC_ID_VORBIS:
caps = GST_CAPS_NEW ("ffmpeg_vorbis",
"application/x-ogg",
NULL
);
break;
case CODEC_ID_H263:
return GST_CAPS_NEW ("ffmpeg_h263",
"video/H263",
NULL);
break;
case CODEC_ID_RV10:
return GST_CAPS_NEW ("ffmpeg_rv10",
"video/x-rv10",
NULL);
break;
case CODEC_ID_MP2:
return GST_CAPS_NEW ("ffmpeg_mp2",
"audio/x-mp3",
NULL);
break;
case CODEC_ID_MP3LAME:
return GST_CAPS_NEW ("ffmpeg_mp3",
"audio/x-mp3",
NULL);
break;
case CODEC_ID_VORBIS:
return GST_CAPS_NEW ("ffmpeg_vorbis",
"application/x-ogg",
NULL);
break;
case CODEC_ID_AC3:
return GST_CAPS_NEW ("ffmpeg_ac3",
caps = GST_CAPS_NEW ("ffmpeg_ac3",
"audio/ac3",
NULL);
NULL
);
break;
case CODEC_ID_MJPEG:
return GST_CAPS_NEW ("ffmpeg_mjpeg",
"video/x-mjpeg",
NULL);
break;
case CODEC_ID_MJPEGB:
return GST_CAPS_NEW ("ffmpeg_mjpeg",
"video/x-mjpegb",
NULL);
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_mjpeg",
"video/jpeg"
);
break;
case CODEC_ID_MPEG4:
if (context) {
return GST_CAPS_NEW ("ffmpeg_mpeg4",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (context->codec_tag),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height)
);
}
else {
return GST_CAPS_NEW ("ffmpeg_mpeg4",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("DIV3")),
"width", GST_PROPS_INT_RANGE (0, 4096),
"height", GST_PROPS_INT_RANGE (0, 4096)
);
}
break;
case CODEC_ID_RAWVIDEO:
return GST_CAPS_NEW ("ffmpeg_rawvideo",
"video/raw",
NULL);
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_mpeg4",
"video/mpeg",
"mpegversion", GST_PROPS_INT (4),
"systemstream", GST_PROPS_BOOLEAN (FALSE)
);
caps = gst_caps_append(caps,
GST_FF_VID_CAPS_NEW ("ffmpeg_divx",
"video/divx",
"divxversion", GST_PROPS_INT (5)
));
caps = gst_caps_append(caps,
GST_FF_VID_CAPS_NEW ("ffmpeg_xvid",
"video/xvid"
));
break;
case CODEC_ID_MSMPEG4V1:
if (context) {
return GST_CAPS_NEW ("ffmpeg_msmpeg4v1",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("MPG4")),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height)
);
}
else {
return GST_CAPS_NEW ("ffmpeg_msmpeg4v1",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("MPG4")),
"width", GST_PROPS_INT_RANGE (0, 4096),
"height", GST_PROPS_INT_RANGE (0, 4096)
);
}
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_msmpeg4v1",
"video/x-msmpeg",
"mpegversion", GST_PROPS_INT (41)
);
break;
case CODEC_ID_MSMPEG4V2:
if (context) {
return GST_CAPS_NEW ("ffmpeg_msmpeg4v2",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("MP42")),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height)
);
}
else {
return GST_CAPS_NEW ("ffmpeg_msmpeg4v2",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("MP42")),
"width", GST_PROPS_INT_RANGE (0, 4096),
"height", GST_PROPS_INT_RANGE (0, 4096)
);
}
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_msmpeg4v2",
"video/x-msmpeg",
"mpegversion", GST_PROPS_INT (42)
);
break;
case CODEC_ID_MSMPEG4V3:
if (context) {
return GST_CAPS_NEW ("ffmpeg_msmpeg4v3",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("DIV3")),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height)
);
}
else {
return GST_CAPS_NEW ("ffmpeg_msmpeg4v3",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("DIV3")),
"width", GST_PROPS_INT_RANGE (0, 4096),
"height", GST_PROPS_INT_RANGE (0, 4096)
);
}
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_msmpeg4v3",
"video/x-msmpeg",
"mpegversion", GST_PROPS_INT (43)
);
break;
case CODEC_ID_WMV1:
if (context) {
return GST_CAPS_NEW ("ffmpeg_wmv1",
"video/avi",
"format", GST_PROPS_STRING ("strf_vids"),
"compression", GST_PROPS_FOURCC (GST_STR_FOURCC ("WMV1")),
"width", GST_PROPS_INT (context->width),
"height", GST_PROPS_INT (context->height)
);
}
else {
return GST_CAPS_NEW ("ffmpeg_wmv1",
"video/x-wmv1",
NULL
);
}
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_wmv1",
"video/wmv",
"wmvversion", GST_PROPS_INT (1)
);
break;
case CODEC_ID_WMV2:
return GST_CAPS_NEW ("ffmpeg_wmv2",
"unknown/unknown",
NULL);
break;
case CODEC_ID_H263P:
return GST_CAPS_NEW ("ffmpeg_h263p",
"unknown/unknown",
NULL);
break;
case CODEC_ID_H263I:
return GST_CAPS_NEW ("ffmpeg_h263i",
"unknown/unknown",
NULL);
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_wmv2",
"video/wmv",
"wmvversion", GST_PROPS_INT (2)
);
break;
case CODEC_ID_SVQ1:
return GST_CAPS_NEW ("ffmpeg_svq1",
"unknown/unknown",
NULL);
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_svq1",
"video/x-svq",
"svqversion", GST_PROPS_INT (1)
);
break;
case CODEC_ID_SVQ3:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_svq3",
"video/x-svq",
"svqversion", GST_PROPS_INT (3)
);
break;
case CODEC_ID_DVAUDIO: /* ??? */
case CODEC_ID_DVVIDEO:
return GST_CAPS_NEW ("ffmpeg_dvvideo",
"unknown/unknown",
NULL);
break;
case CODEC_ID_DVAUDIO:
return GST_CAPS_NEW ("ffmpeg_dvaudio",
"unknown/unknown",
NULL);
if (!context) {
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_dvvideo",
"video/dv",
"format", GST_PROPS_LIST (
GST_PROPS_STRING ("NTSC"),
GST_PROPS_STRING ("PAL")
)
);
} else {
GstPropsEntry *normentry;
if (context->height == 576) {
normentry = gst_props_entry_new("format", GST_PROPS_STRING ("PAL"));
} else {
normentry = gst_props_entry_new("format", GST_PROPS_STRING ("NTSC"));
}
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_dvvideo",
"video/dv",
);
gst_props_add_entry(caps->properties, normentry);
}
break;
case CODEC_ID_WMAV1:
return GST_CAPS_NEW ("ffmpeg_wmav1",
"unknown/unknown",
NULL);
caps = GST_CAPS_NEW ("ffmpeg_wma1",
"audio/wma",
"wmaversion", GST_PROPS_INT (1)
);
break;
case CODEC_ID_WMAV2:
return GST_CAPS_NEW ("ffmpeg_wmav2",
"unknown/unknown",
NULL);
caps = GST_CAPS_NEW ("ffmpeg_wma2",
"audio/wma",
"wmaversion", GST_PROPS_INT (2)
);
break;
case CODEC_ID_MACE3:
return GST_CAPS_NEW ("ffmpeg_mace3",
"unknown/unknown",
NULL);
/* .. */
break;
case CODEC_ID_MACE6:
return GST_CAPS_NEW ("ffmpeg_mace6",
"unknown/unknown",
NULL);
/* .. */
break;
case CODEC_ID_HUFFYUV:
return GST_CAPS_NEW ("ffmpeg_huffyuv",
"video/x-huffyuv",
NULL);
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_huffyuv",
"video/huffyuv"
);
break;
/* various pcm "codecs" */
case CODEC_ID_PCM_S16LE:
return GST_CAPS_NEW ("ffmpeg_s16le",
"unknown/unknown",
NULL);
case CODEC_ID_CYUV:
/* .. */
break;
case CODEC_ID_PCM_S16BE:
return GST_CAPS_NEW ("ffmpeg_s16be",
"unknown/unknown",
NULL);
case CODEC_ID_H264:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_h264",
"video/h264"
);
break;
case CODEC_ID_PCM_U16LE:
return GST_CAPS_NEW ("ffmpeg_u16le",
"unknown/unknown",
NULL);
case CODEC_ID_INDEO3:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_indeo3",
"video/indeo3"
);
break;
case CODEC_ID_PCM_U16BE:
return GST_CAPS_NEW ("ffmpeg_u16be",
"unknown/unknown",
NULL);
case CODEC_ID_VP3:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_vp3",
"video/vp3"
);
caps = gst_caps_append(caps,
GST_FF_VID_CAPS_NEW ("ffmpeg_theora",
"video/x-theora"
));
break;
case CODEC_ID_PCM_S8:
return GST_CAPS_NEW ("ffmpeg_s8",
"unknown/unknown",
NULL);
case CODEC_ID_AAC:
/* .. */
break;
case CODEC_ID_PCM_U8:
return GST_CAPS_NEW ("ffmpeg_u8",
"unknown/unknown",
NULL);
case CODEC_ID_MPEG4AAC:
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_mpeg4aac",
"video/mpeg",
"systemstream", GST_PROPS_BOOLEAN (FALSE),
"mpegversion", GST_PROPS_INT (4)
);
break;
case CODEC_ID_PCM_MULAW:
return GST_CAPS_NEW ("ffmpeg_mulaw",
"unknown/unknown",
NULL);
case CODEC_ID_ASV1:
/* .. */
break;
case CODEC_ID_PCM_ALAW:
return GST_CAPS_NEW ("ffmpeg_alaw",
"unknown/unknown",
NULL);
break;
/* various adpcm codecs */
case CODEC_ID_ADPCM_IMA_QT:
return GST_CAPS_NEW ("ffmpeg_adpcm_ima_qt",
"unknown/unknown",
NULL);
/* .. */
break;
case CODEC_ID_ADPCM_IMA_WAV:
return GST_CAPS_NEW ("ffmpeg_adpcm_ima_wav",
"unknown/unknown",
NULL);
/* .. */
break;
case CODEC_ID_ADPCM_MS:
return GST_CAPS_NEW ("ffmpeg_adpcm_ms",
"unknown/unknown",
NULL);
/* .. */
break;
case CODEC_ID_AMR_NB:
/* .. */
break;
default:
g_warning ("no caps found for codec id %d\n", codec_id);
/* .. */
break;
}
return NULL;
if (caps != NULL) {
char *str = g_strdup_printf("The caps that belongs to codec_id=%d",
codec_id);
gst_caps_debug(caps, str);
g_free(str);
}
return caps;
}
/* Convert a FFMPEG Pixel Format and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* See below for usefullness
*/
static GstCaps *
gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt,
AVCodecContext *context)
{
GstCaps *caps = NULL;
int bpp = 0, depth = 0, endianness = 0;
gulong g_mask = 0, r_mask = 0, b_mask = 0;
guint32 fmt = 0;
switch (pix_fmt) {
case PIX_FMT_YUV420P:
fmt = GST_MAKE_FOURCC ('I','4','2','0');
break;
case PIX_FMT_YUV422:
fmt = GST_MAKE_FOURCC ('Y','U','Y','2');
break;
case PIX_FMT_RGB24:
bpp = depth = 24;
endianness = G_BIG_ENDIAN;
r_mask = 0xff0000; g_mask = 0x00ff00; b_mask = 0x0000ff;
break;
case PIX_FMT_BGR24:
bpp = depth = 24;
endianness = G_LITTLE_ENDIAN;
r_mask = 0xff0000; g_mask = 0x00ff00; b_mask = 0x0000ff;
break;
case PIX_FMT_YUV422P:
/* .. */
break;
case PIX_FMT_YUV444P:
/* .. */
break;
case PIX_FMT_RGBA32:
bpp = depth = 32;
endianness = G_BYTE_ORDER;
r_mask = 0x00ff0000; g_mask = 0x0000ff00; b_mask = 0x000000ff;
break;
case PIX_FMT_YUV410P:
/* .. */
break;
case PIX_FMT_YUV411P:
fmt = GST_MAKE_FOURCC ('Y','4','1','P');
break;
case PIX_FMT_RGB565:
bpp = depth = 16;
endianness = G_BYTE_ORDER;
r_mask = 0xf800; g_mask = 0x07e0; b_mask = 0x001f;
break;
case PIX_FMT_RGB555:
bpp = 16; depth = 15;
endianness = G_BYTE_ORDER;
r_mask = 0x7c00; g_mask = 0x03e0; b_mask = 0x001f;
break;
default:
/* give up ... */
break;
}
if (bpp != 0) {
fmt = GST_MAKE_FOURCC ('R','G','B',' ');
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_rawvideo",
"video/raw",
"format", GST_PROPS_FOURCC (fmt),
"bpp", GST_PROPS_INT (bpp),
"depth", GST_PROPS_INT (depth),
"red_mask", GST_PROPS_INT (r_mask),
"green_mask", GST_PROPS_INT (g_mask),
"blue_mask", GST_PROPS_INT (b_mask),
"endianness", GST_PROPS_INT (endianness)
);
} else if (fmt) {
caps = GST_FF_VID_CAPS_NEW ("ffmpeg_rawvideo",
"video/raw",
"format", GST_PROPS_FOURCC (fmt)
);
}
if (caps != NULL) {
char *str = g_strdup_printf("The caps that belongs to pix_fmt=%d",
pix_fmt);
gst_caps_debug(caps, str);
g_free(str);
}
return caps;
}
/* Convert a FFMPEG Sample Format and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* See below for usefullness
*/
static GstCaps *
gst_ffmpeg_smpfmt_to_caps (enum SampleFormat sample_fmt,
AVCodecContext *context)
{
GstCaps *caps = NULL;
int bpp = 0;
gboolean signedness = FALSE;
switch (sample_fmt) {
case SAMPLE_FMT_S16:
signedness = TRUE;
bpp = 16;
break;
default:
/* .. */
break;
}
if (bpp) {
caps = GST_FF_AUD_CAPS_NEW ("ffmpeg_rawaudio",
"audio/raw",
"signed", GST_PROPS_BOOLEAN (signedness),
"endianness", GST_PROPS_INT (G_BYTE_ORDER),
"width", GST_PROPS_INT (bpp),
"depth", GST_PROPS_INT (bpp),
"law", GST_PROPS_INT (0),
"format", GST_PROPS_STRING ("int")
);
}
if (caps != NULL) {
char *str = g_strdup_printf("The caps that belongs to sample_fmt=%d",
sample_fmt);
gst_caps_debug(caps, str);
g_free(str);
}
return caps;
}
/* Convert a FFMPEG codec Type and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
*
* CodecType is primarily meant for uncompressed data GstCaps!
*/
GstCaps *
gst_ffmpeg_codectype_to_caps (enum CodecType codec_type,
AVCodecContext *context)
{
GstCaps *caps = NULL;
switch (codec_type) {
case CODEC_TYPE_VIDEO:
if (context) {
caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt, context);
} else {
GstCaps *temp;
enum PixelFormat i;
for (i = 0; i < PIX_FMT_NB; i++) {
temp = gst_ffmpeg_pixfmt_to_caps (i, NULL);
if (temp != NULL) {
caps = gst_caps_append (caps, temp);
}
}
}
break;
case CODEC_TYPE_AUDIO:
if (context) {
caps = gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context);
} else {
GstCaps *temp;
enum SampleFormat i;
for (i = 0; i <= SAMPLE_FMT_S16; i++) {
temp = gst_ffmpeg_smpfmt_to_caps (i, NULL);
if (temp != NULL) {
caps = gst_caps_append (caps, temp);
}
}
}
break;
default:
/* .. */
break;
}
if (caps != NULL) {
char *str = g_strdup_printf("The caps that belongs to codec_type=%d",
codec_type);
gst_caps_debug(caps, str);
g_free(str);
}
return caps;
}

View file

@ -17,24 +17,30 @@
* Boston, MA 02111-1307, USA.
*/
#ifndef __GST_FFMPEGALL_CODECMAP_H__
#define __GST_FFMPEGALL_CODECMAP_H__
#ifndef __GST_FFMPEG_CODECMAP_H__
#define __GST_FFMPEG_CODECMAP_H__
#include "config.h"
#ifdef HAVE_FFMPEG_UNINSTALLED
#include <avcodec.h>
#else
#include <ffmpeg/avcodec.h>
#endif
#include <string.h>
#include <gst/gst.h>
/* _codecid_to_caps () gets the GstCaps that belongs to
* a certain CodecID for a pad with compressed data.
*/
GstCaps *
gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
AVCodecContext *context);
gst_ffmpeg_codecid_to_caps (enum CodecID codec_id,
AVCodecContext *context);
enum CodecID
gst_ffmpeg_caps_to_codecid (GstCaps *caps,
AVCodecContext *context);
/* _codectype_to_caps () gets the GstCaps that belongs to
* a certain CodecType for a pad with uncompressed data.
*/
#endif /* __GST_FFMPEGALL_CODECMAP_H__ */
GstCaps *
gst_ffmpeg_codectype_to_caps (enum CodecType codec_type,
AVCodecContext *context);
#endif /* __GST_FFMPEG_CODECMAP_H__ */

View file

@ -17,9 +17,11 @@
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <assert.h>
#include <string.h>
#include "config.h"
#ifdef HAVE_FFMPEG_UNINSTALLED
#include <avcodec.h>
#else
@ -28,9 +30,8 @@
#include <gst/gst.h>
extern GstCaps* gst_ffmpegcodec_codec_context_to_caps (AVCodecContext *ctx, int id);
#include "gstffmpegcodecmap.h"
typedef struct _GstFFMpegDec GstFFMpegDec;
struct _GstFFMpegDec {
@ -42,6 +43,7 @@ struct _GstFFMpegDec {
AVCodecContext *context;
AVFrame *picture;
gboolean opened;
};
typedef struct _GstFFMpegDecClass GstFFMpegDecClass;
@ -50,12 +52,12 @@ struct _GstFFMpegDecClass {
GstElementClass parent_class;
AVCodec *in_plugin;
GstPadTemplate *templ;
GstPadTemplate *srctempl, *sinktempl;
};
typedef struct {
AVCodec *in_plugin;
GstPadTemplate *templ;
GstPadTemplate *srctempl, *sinktempl;
} GstFFMpegClassParams;
#define GST_TYPE_FFMPEGDEC \
@ -79,54 +81,26 @@ enum {
/* FILL ME */
};
/* This factory is much simpler, and defines the source pad. */
GST_PAD_TEMPLATE_FACTORY (gst_ffmpegdec_audio_src_factory,
"src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_CAPS_NEW (
"ffmpegdec_src",
"audio/raw",
"format", GST_PROPS_STRING ("int"),
"law", GST_PROPS_INT (0),
"endianness", GST_PROPS_INT (G_BYTE_ORDER),
"signed", GST_PROPS_BOOLEAN (TRUE),
"width", GST_PROPS_INT (16),
"depth", GST_PROPS_INT (16),
"rate", GST_PROPS_INT_RANGE (8000, 96000),
"channels", GST_PROPS_INT_RANGE (1, 2)
)
)
/* This factory is much simpler, and defines the source pad. */
GST_PAD_TEMPLATE_FACTORY (gst_ffmpegdec_video_src_factory,
"src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_CAPS_NEW (
"ffmpegdec_src",
"video/raw",
"format", GST_PROPS_LIST (
GST_PROPS_FOURCC (GST_STR_FOURCC ("I420"))
),
"width", GST_PROPS_INT_RANGE (16, 4096),
"height", GST_PROPS_INT_RANGE (16, 4096)
)
)
static GHashTable *global_plugins;
/* A number of functon prototypes are given so we can refer to them later. */
static void gst_ffmpegdec_class_init (GstFFMpegDecClass *klass);
static void gst_ffmpegdec_init (GstFFMpegDec *ffmpegdec);
static void gst_ffmpegdec_dispose (GObject *object);
static void gst_ffmpegdec_chain_audio (GstPad *pad, GstBuffer *buffer);
static void gst_ffmpegdec_chain_video (GstPad *pad, GstBuffer *buffer);
static GstPadLinkReturn gst_ffmpegdec_connect (GstPad *pad,
GstCaps *caps);
static void gst_ffmpegdec_chain (GstPad *pad,
GstBuffer *buffer);
static void gst_ffmpegdec_set_property (GObject *object, guint prop_id, const GValue *value,
GParamSpec *pspec);
static void gst_ffmpegdec_get_property (GObject *object, guint prop_id, GValue *value,
GParamSpec *pspec);
static GstElementStateReturn
gst_ffmpegdec_change_state (GstElement *element);
/* some sort of bufferpool handling, but different */
static int gst_ffmpegdec_get_buffer (AVCodecContext *context,
AVFrame *picture);
static void gst_ffmpegdec_release_buffer (AVCodecContext *context,
AVFrame *picture);
static GstElementClass *parent_class = NULL;
@ -148,94 +122,283 @@ gst_ffmpegdec_class_init (GstFFMpegDecClass *klass)
GINT_TO_POINTER (G_OBJECT_CLASS_TYPE (gobject_class)));
klass->in_plugin = params->in_plugin;
klass->templ = params->templ;
klass->srctempl = params->srctempl;
klass->sinktempl = params->sinktempl;
gobject_class->set_property = gst_ffmpegdec_set_property;
gobject_class->get_property = gst_ffmpegdec_get_property;
}
static GstPadLinkReturn
gst_ffmpegdec_sinkconnect (GstPad *pad, GstCaps *caps)
{
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *)(gst_pad_get_parent (pad));
GstFFMpegDecClass *oclass = (GstFFMpegDecClass*)(G_OBJECT_GET_CLASS (ffmpegdec));
if (!GST_CAPS_IS_FIXED (caps))
return GST_PAD_LINK_DELAYED;
if (gst_caps_has_property_typed (caps, "width", GST_PROPS_INT_TYPE))
gst_caps_get_int (caps, "width", &ffmpegdec->context->width);
if (gst_caps_has_property_typed (caps, "height", GST_PROPS_INT_TYPE))
gst_caps_get_int (caps, "height", &ffmpegdec->context->height);
ffmpegdec->context->pix_fmt = PIX_FMT_YUV420P;
ffmpegdec->context->bit_rate = 0;
/* FIXME bug in ffmpeg */
/*
if (avcodec_open (ffmpegdec->context, avcodec_find_encoder(CODEC_ID_MPEG1VIDEO)) <0 ) {
g_warning ("ffmpegdec: could not open codec");
return GST_PAD_LINK_REFUSED;
}
*/
if (avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0) {
g_warning ("ffmpegdec: could not open codec");
return GST_PAD_LINK_REFUSED;
}
return GST_PAD_LINK_OK;
gobject_class->dispose = gst_ffmpegdec_dispose;
gstelement_class->change_state = gst_ffmpegdec_change_state;
}
static void
gst_ffmpegdec_init(GstFFMpegDec *ffmpegdec)
gst_ffmpegdec_init (GstFFMpegDec *ffmpegdec)
{
GstFFMpegDecClass *oclass = (GstFFMpegDecClass*)(G_OBJECT_GET_CLASS (ffmpegdec));
ffmpegdec->context = avcodec_alloc_context();
ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->templ, "sink");
gst_pad_set_link_function (ffmpegdec->sinkpad, gst_ffmpegdec_sinkconnect);
if (oclass->in_plugin->type == CODEC_TYPE_VIDEO) {
ffmpegdec->srcpad = gst_pad_new_from_template (
GST_PAD_TEMPLATE_GET (gst_ffmpegdec_video_src_factory), "src");
gst_pad_set_chain_function (ffmpegdec->sinkpad, gst_ffmpegdec_chain_video);
}
else if (oclass->in_plugin->type == CODEC_TYPE_AUDIO) {
ffmpegdec->srcpad = gst_pad_new_from_template (
GST_PAD_TEMPLATE_GET (gst_ffmpegdec_audio_src_factory), "src");
gst_pad_set_chain_function (ffmpegdec->sinkpad, gst_ffmpegdec_chain_audio);
}
/* setup pads */
ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
gst_pad_set_link_function (ffmpegdec->sinkpad, gst_ffmpegdec_connect);
gst_pad_set_chain_function (ffmpegdec->sinkpad, gst_ffmpegdec_chain);
ffmpegdec->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->sinkpad);
gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->srcpad);
ffmpegdec->picture = g_malloc0 (sizeof (AVFrame));
/* some ffmpeg data */
ffmpegdec->context = avcodec_alloc_context();
ffmpegdec->picture = avcodec_alloc_frame();
ffmpegdec->opened = FALSE;
}
static void
gst_ffmpegdec_chain_audio (GstPad *pad, GstBuffer *inbuf)
gst_ffmpegdec_dispose (GObject *object)
{
/*GstFFMpegDec *ffmpegdec = (GstFFMpegDec *)(gst_pad_get_parent (pad)); */
gpointer data;
gint size;
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
/* close old session */
if (ffmpegdec->opened) {
avcodec_close (ffmpegdec->context);
ffmpegdec->opened = FALSE;
}
data = GST_BUFFER_DATA (inbuf);
size = GST_BUFFER_SIZE (inbuf);
/* clean up remaining allocated data */
av_free (ffmpegdec->context);
av_free (ffmpegdec->picture);
}
GST_DEBUG (0, "got buffer %p %d", data, size);
static GstPadLinkReturn
gst_ffmpegdec_connect (GstPad *pad,
GstCaps *caps)
{
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *)(gst_pad_get_parent (pad));
GstFFMpegDecClass *oclass = (GstFFMpegDecClass*)(G_OBJECT_GET_CLASS (ffmpegdec));
gst_buffer_unref (inbuf);
/* we want fixed caps */
if (!GST_CAPS_IS_FIXED (caps))
return GST_PAD_LINK_DELAYED;
/* close old session */
if (ffmpegdec->opened) {
avcodec_close (ffmpegdec->context);
ffmpegdec->opened = FALSE;
}
/* set defaults */
avcodec_get_context_defaults (ffmpegdec->context);
/* set buffer functions */
ffmpegdec->context->get_buffer = gst_ffmpegdec_get_buffer;
ffmpegdec->context->release_buffer = gst_ffmpegdec_release_buffer;
switch (oclass->in_plugin->type) {
case CODEC_TYPE_VIDEO:
/* get size */
if (gst_caps_has_property_typed (caps, "width", GST_PROPS_INT_TYPE))
gst_caps_get_int (caps, "width", &ffmpegdec->context->width);
if (gst_caps_has_property_typed (caps, "height", GST_PROPS_INT_TYPE))
gst_caps_get_int (caps, "height", &ffmpegdec->context->height);
break;
case CODEC_TYPE_AUDIO:
/* FIXME: does ffmpeg want us to set the sample format
* and the rate+channels here? Or does it provide them
* itself? */
break;
default:
/* Unsupported */
return GST_PAD_LINK_REFUSED;
}
/* we dont send complete frames */
if (oclass->in_plugin->capabilities & CODEC_CAP_TRUNCATED)
ffmpegdec->context->flags |= CODEC_FLAG_TRUNCATED;
/* open codec - we don't select an output pix_fmt yet,
* simply because we don't know! We only get it
* during playback... */
if (avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0) {
GST_DEBUG (GST_CAT_PLUGIN_INFO,
"ffdec_%s: Failed to open FFMPEG codec",
oclass->in_plugin->name);
return GST_PAD_LINK_REFUSED;
}
/* done! */
ffmpegdec->opened = TRUE;
return GST_PAD_LINK_OK;
}
/* innocent hacks */
#define EDGE_WIDTH 16
#define ALIGN(x) (((x)+alignment)&~alignment)
static int
gst_ffmpegdec_get_buffer (AVCodecContext *context,
AVFrame *picture)
{
GstBuffer *buf = NULL;
gint hor_chr_dec = 0, ver_chr_dec = 0, bpp = 0;
gint width, height;
gint alignment;
gulong bufsize = 0;
void *base;
/* set alignment */
if (context->codec_id == CODEC_ID_SVQ1) {
alignment = 63;
} else {
alignment = 15;
}
/* set start size */
width = ALIGN (context->width);
height = ALIGN (context->height);
/* edge */
if (!(context->flags & CODEC_FLAG_EMU_EDGE)) {
width += EDGE_WIDTH * 2;
height += EDGE_WIDTH * 2;
}
switch (context->codec_type) {
case CODEC_TYPE_VIDEO:
switch (context->pix_fmt) {
case PIX_FMT_YUV420P:
bpp = 12;
hor_chr_dec = ver_chr_dec = 2;
break;
case PIX_FMT_YUV422:
bpp = 16;
break;
case PIX_FMT_YUV422P:
bpp = 16;
hor_chr_dec = 2; ver_chr_dec = 1;
break;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
bpp = 24;
break;
case PIX_FMT_YUV444P:
bpp = 24;
hor_chr_dec = ver_chr_dec = 1;
break;
case PIX_FMT_RGBA32:
bpp = 32;
break;
case PIX_FMT_YUV410P:
bpp = 9;
hor_chr_dec = ver_chr_dec = 4;
break;
case PIX_FMT_YUV411P:
bpp = 12;
hor_chr_dec = 4; ver_chr_dec = 1;
break;
case PIX_FMT_RGB565:
case PIX_FMT_RGB555:
bpp = 16;
break;
default:
g_assert (0);
break;
}
bufsize = width * height * bpp / 8;
break;
case CODEC_TYPE_AUDIO:
bufsize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
break;
default:
g_assert (0);
break;
}
/* create buffer */
buf = gst_buffer_new_and_alloc (bufsize);
/* set up planes */
base = GST_BUFFER_DATA (buf);
if (hor_chr_dec > 0 && ver_chr_dec > 0) {
picture->linesize[0] = width;
picture->data[0] = base;
if (!(context->flags & CODEC_FLAG_EMU_EDGE)) {
picture->data[0] += (picture->linesize[0] * EDGE_WIDTH) + EDGE_WIDTH;
}
base += width * height;
picture->linesize[1] = picture->linesize[0] / hor_chr_dec;
picture->data[1] = base;
if (!(context->flags & CODEC_FLAG_EMU_EDGE)) {
picture->data[1] += (picture->linesize[1] * EDGE_WIDTH / ver_chr_dec) +
(EDGE_WIDTH / hor_chr_dec);
}
base += (width * height) / (ver_chr_dec * hor_chr_dec);
picture->linesize[2] = picture->linesize[1];
picture->data[2] = base;
if (!(context->flags & CODEC_FLAG_EMU_EDGE)) {
picture->data[2] += (picture->linesize[2] * EDGE_WIDTH / ver_chr_dec) +
(EDGE_WIDTH / hor_chr_dec);
}
} else {
picture->linesize[0] = GST_BUFFER_MAXSIZE (buf) / height;
picture->data[0] = base;
if (!(context->flags & CODEC_FLAG_EMU_EDGE)) {
picture->data[0] += (picture->linesize[0] * EDGE_WIDTH) + EDGE_WIDTH;
}
picture->linesize[1] = picture->linesize[2] = 0;
picture->data[1] = picture->data[2] = NULL;
}
picture->linesize[3] = 0;
picture->data[3] = NULL;
/* tell ffmpeg we own this buffer
*
* we also use an evil hack (keep buffer in base[0])
* to keep a reference to the buffer in release_buffer(),
* so that we can ref() it here and unref() it there
* so that we don't need to copy data */
picture->type = FF_BUFFER_TYPE_USER;
picture->age = G_MAXINT;
picture->base[0] = (int8_t *) buf;
gst_buffer_ref (buf);
return 0;
}
static void
gst_ffmpegdec_chain_video (GstPad *pad, GstBuffer *inbuf)
gst_ffmpegdec_release_buffer (AVCodecContext *context,
AVFrame *picture)
{
gint i;
GstBuffer *buf = GST_BUFFER (picture->base[0]);
gst_buffer_unref (buf);
/* zero out the reference in ffmpeg */
for (i=0;i<4;i++) {
picture->data[i] = NULL;
picture->linesize[i] = 0;
}
picture->base[0] = NULL;
}
static void
gst_ffmpegdec_chain (GstPad *pad,
GstBuffer *inbuf)
{
GstBuffer *outbuf;
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *)(gst_pad_get_parent (pad));
GstFFMpegDecClass *oclass = (GstFFMpegDecClass*)(G_OBJECT_GET_CLASS (ffmpegdec));
guchar *data;
gint size, frame_size, len;
gint have_picture;
gint size, len = 0;
gint have_data;
/* FIXME: implement event awareness (especially EOS
* (av_close_codec ()) and FLUSH/DISCONT
* (avcodec_flush_buffers ()))
*/
data = GST_BUFFER_DATA (inbuf);
size = GST_BUFFER_SIZE (inbuf);
@ -243,103 +406,87 @@ gst_ffmpegdec_chain_video (GstPad *pad, GstBuffer *inbuf)
do {
ffmpegdec->context->frame_number++;
len = avcodec_decode_video (ffmpegdec->context, ffmpegdec->picture,
&have_picture, data, size);
switch (oclass->in_plugin->type) {
case CODEC_TYPE_VIDEO:
len = avcodec_decode_video (ffmpegdec->context,
ffmpegdec->picture,
&have_data,
data, size);
break;
case CODEC_TYPE_AUDIO:
len = avcodec_decode_audio (ffmpegdec->context,
(int16_t *) ffmpegdec->picture->data[0],
&have_data,
data, size);
break;
default:
g_assert(0);
break;
}
if (len < 0) {
g_warning ("ffmpegdec: decoding error");
g_warning ("ffdec_%s: decoding error",
oclass->in_plugin->name);
break;
}
if (have_picture) {
guchar *picdata, *picdata2, *outdata, *outdata2;
gint xsize, i, width, height;
width = ffmpegdec->context->width;
height = ffmpegdec->context->height;
if (have_data) {
if (!GST_PAD_CAPS (ffmpegdec->srcpad)) {
gst_pad_try_set_caps (ffmpegdec->srcpad,
GST_CAPS_NEW (
"ffmpegdec_src",
"video/raw",
"format", GST_PROPS_FOURCC (GST_STR_FOURCC ("I420")),
"width", GST_PROPS_INT (width),
"height", GST_PROPS_INT (height)
));
GstCaps *caps;
if (!(ffmpegdec->context->flags & CODEC_FLAG_EMU_EDGE)) {
ffmpegdec->context->width += EDGE_WIDTH * 2;
ffmpegdec->context->height += EDGE_WIDTH * 2;
}
caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
ffmpegdec->context);
if (!(ffmpegdec->context->flags & CODEC_FLAG_EMU_EDGE)) {
ffmpegdec->context->width -= EDGE_WIDTH * 2;
ffmpegdec->context->height -= EDGE_WIDTH * 2;
}
if (caps == NULL ||
gst_pad_try_set_caps (ffmpegdec->srcpad, caps) <= 0) {
gst_element_error (GST_ELEMENT (ffmpegdec),
"Failed to link ffmpeg decoder (%s) to next element",
oclass->in_plugin->name);
return;
}
}
frame_size = width * height;
outbuf = gst_buffer_new ();
GST_BUFFER_SIZE (outbuf) = (frame_size*3)>>1;
outdata = GST_BUFFER_DATA (outbuf) = g_malloc (GST_BUFFER_SIZE (outbuf));
outbuf = GST_BUFFER (ffmpegdec->picture->base[0]);
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
picdata = ffmpegdec->picture->data[0];
xsize = ffmpegdec->picture->linesize[0];
for (i=height; i; i--) {
memcpy (outdata, picdata, width);
outdata += width;
picdata += xsize;
}
frame_size >>= 2;
width >>= 1;
height >>= 1;
outdata2 = outdata + frame_size;
picdata = ffmpegdec->picture->data[1];
picdata2 = ffmpegdec->picture->data[2];
xsize = ffmpegdec->picture->linesize[1];
for (i=height; i; i--) {
memcpy (outdata, picdata, width);
memcpy (outdata2, picdata2, width);
outdata += width; outdata2 += width;
picdata += xsize; picdata2 += xsize;
}
if (oclass->in_plugin->type == CODEC_TYPE_AUDIO)
GST_BUFFER_SIZE (outbuf) = have_data;
else
GST_BUFFER_SIZE (outbuf) = GST_BUFFER_MAXSIZE (outbuf);
gst_pad_push (ffmpegdec->srcpad, outbuf);
}
size -= len;
data += len;
}
while (size > 0);
} while (size > 0);
gst_buffer_unref (inbuf);
}
static void
gst_ffmpegdec_set_property (GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
static GstElementStateReturn
gst_ffmpegdec_change_state (GstElement *element)
{
GstFFMpegDec *ffmpegdec;
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) element;
gint transition = GST_STATE_TRANSITION (element);
/* Get a pointer of the right type. */
ffmpegdec = (GstFFMpegDec *)(object);
/* Check the argument id to see which argument we're setting. */
switch (prop_id) {
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
switch (transition) {
case GST_STATE_PAUSED_TO_READY:
if (ffmpegdec->opened) {
avcodec_close (ffmpegdec->context);
ffmpegdec->opened = FALSE;
}
break;
}
}
/* The set function is simply the inverse of the get fuction. */
static void
gst_ffmpegdec_get_property (GObject *object, guint prop_id, GValue *value, GParamSpec *pspec)
{
GstFFMpegDec *ffmpegdec;
if (GST_ELEMENT_CLASS (parent_class)->change_state)
return GST_ELEMENT_CLASS (parent_class)->change_state (element);
/* It's not null if we got it, but it might not be ours */
ffmpegdec = (GstFFMpegDec *)(object);
switch (prop_id) {
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
return GST_STATE_SUCCESS;
}
gboolean
@ -368,8 +515,8 @@ gst_ffmpegdec_register (GstPlugin *plugin)
while (in_plugin) {
gchar *type_name;
gchar *codec_type;
GstPadTemplate *sinktempl;
GstCaps *sinkcaps;
GstPadTemplate *sinktempl, *srctempl;
GstCaps *sinkcaps, *srccaps;
GstFFMpegClassParams *params;
if (in_plugin->decode) {
@ -378,6 +525,13 @@ gst_ffmpegdec_register (GstPlugin *plugin)
else {
goto next;
}
/* first make sure we've got a supported type */
sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL);
srccaps = gst_ffmpeg_codectype_to_caps (in_plugin->type, NULL);
if (!sinkcaps || !srccaps)
goto next;
/* construct the type */
type_name = g_strdup_printf("ff%s_%s", codec_type, in_plugin->name);
@ -391,42 +545,43 @@ gst_ffmpegdec_register (GstPlugin *plugin)
type = g_type_register_static(GST_TYPE_ELEMENT, type_name , &typeinfo, 0);
/* construct the element details struct */
details = g_new0 (GstElementDetails,1);
details->longname = g_strdup (in_plugin->name);
details->klass = "Codec/FFMpeg";
details->license = "LGPL";
details->description = g_strdup (in_plugin->name);
details->version = g_strdup("1.0.0");
details->author = g_strdup("The FFMPEG crew, GStreamer plugin by Wim Taymans <wim.taymans@chello.be>");
details->copyright = g_strdup("(c) 2001");
details = g_new0 (GstElementDetails, 1);
details->longname = g_strdup(in_plugin->name);
details->klass = g_strdup_printf("Codec/%s/%s",
(in_plugin->type == CODEC_TYPE_VIDEO) ?
"Video" : "Audio",
type_name);
details->license = g_strdup("LGPL");
details->description = g_strdup(in_plugin->name);
details->version = g_strdup(VERSION);
details->author = g_strdup("The FFMPEG crew\n"
"Wim Taymans <wim.taymans@chello.be>\n"
"Ronald Bultje <rbultje@ronald.bitfreak.net>");
details->copyright = g_strdup("(c) 2001-2003");
/* register the plugin with gstreamer */
factory = gst_element_factory_new(type_name,type,details);
g_return_val_if_fail(factory != NULL, FALSE);
gst_element_factory_set_rank (factory, GST_ELEMENT_RANK_NONE);
gst_element_factory_set_rank (factory, GST_ELEMENT_RANK_MARGINAL);
sinkcaps = gst_ffmpegcodec_codec_context_to_caps (NULL, in_plugin->id);
sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, sinkcaps, NULL);
sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
GST_PAD_ALWAYS, sinkcaps, NULL);
gst_element_factory_add_pad_template (factory, sinktempl);
srctempl = gst_pad_template_new ("src", GST_PAD_SRC,
GST_PAD_ALWAYS, srccaps, NULL);
gst_element_factory_add_pad_template (factory, srctempl);
params = g_new0 (GstFFMpegClassParams, 1);
params->in_plugin = in_plugin;
params->templ = sinktempl;
params->sinktempl = sinktempl;
params->srctempl = srctempl;
g_hash_table_insert (global_plugins,
GINT_TO_POINTER (type),
(gpointer) params);
if (in_plugin->type == CODEC_TYPE_VIDEO) {
gst_element_factory_add_pad_template (factory,
GST_PAD_TEMPLATE_GET (gst_ffmpegdec_video_src_factory));
}
else if (in_plugin->type == CODEC_TYPE_AUDIO) {
gst_element_factory_add_pad_template (factory,
GST_PAD_TEMPLATE_GET (gst_ffmpegdec_audio_src_factory));
}
/* The very last thing is to register the elementfactory with the plugin. */
gst_plugin_add_feature (plugin, GST_PLUGIN_FEATURE (factory));

View file

@ -17,9 +17,11 @@
* Boston, MA 02111-1307, USA.
*/
#include <string.h>
#include <assert.h>
#include "config.h"
#include <assert.h>
#include <string.h>
#ifdef HAVE_FFMPEG_UNINSTALLED
#include <avcodec.h>
#else
@ -28,6 +30,8 @@
#include <gst/gst.h>
#include "gstffmpegcodecmap.h"
typedef struct _GstFFMpegEnc GstFFMpegEnc;
struct _GstFFMpegEnc {
@ -87,6 +91,8 @@ enum {
/* FILL ME */
};
int motion_estimation_method;
/* This factory is much simpler, and defines the source pad. */
GST_PAD_TEMPLATE_FACTORY (gst_ffmpegenc_src_factory,
"src",