gstreamer/gst/mpegvideoparse/mpegvideoparse.c
Jan Schmidt 1fe4050c0a gst/: Fix compiler warnings shown by Forte.
Original commit message from CVS:
* gst/librfb/rfbbuffer.c: (rfb_buffer_new_and_alloc):
* gst/librfb/rfbbuffer.h:
* gst/librfb/rfbdecoder.c: (rfb_socket_get_buffer):
* gst/mpegvideoparse/mpegvideoparse.c: (gst_mpegvideoparse_chain):
* gst/nsf/nes6502.c: (nes6502_execute):
* gst/real/gstrealaudiodec.c: (gst_real_audio_dec_setcaps):
* gst/real/gstrealvideodec.c: (open_library):
* gst/real/gstrealvideodec.h:
* gst/rtpmanager/gstrtpsession.c: (create_recv_rtp_sink),
(create_recv_rtcp_sink), (create_send_rtp_sink):
Fix compiler warnings shown by Forte.
2007-10-08 17:46:45 +00:00

561 lines
18 KiB
C

/* GStreamer
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
* Copyright (C) <2007> Jan Schmidt <thaytan@mad.scientist.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include "mpegvideoparse.h"
/* FIXME: there are still some things to do in this element.
* + Handle Sequence Display Extension to output the display size
* rather than the encoded size.
* + Do all the other stuff (documentation, tests) to get it into
* ugly or good.
* + low priority:
* - handle seeking in raw elementary streams
* - calculate timestamps for all un-timestamped frames, taking into
* account frame re-ordering. Doing this probably requires introducing
* an extra end-to-end delay however, so might not be really desirable.
* - Collect a list of regions and the sequence headers that apply
* to each region so that we properly handle SEQUENCE_END followed
* by a new sequence. At the moment, the caps will change if the
* sequence changes, but if we then seek to a different spot it might
* be wrong. Fortunately almost every stream only has 1 sequence.
*/
GST_DEBUG_CATEGORY_STATIC (mpv_parse_debug);
#define GST_CAT_DEFAULT mpv_parse_debug
/* elementfactory information */
static GstElementDetails mpegvideoparse_details =
GST_ELEMENT_DETAILS ("MPEG video elementary stream parser",
"Codec/Parser/Video",
"Parses and frames MPEG-1 and MPEG-2 elementary video streams",
"Wim Taymans <wim.taymans@chello.be>\n"
"Jan Schmidt <thaytan@mad.scientist.com>");
static GstStaticPadTemplate src_template =
GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("video/mpeg, "
"mpegversion = (int) [ 1, 2 ], "
"parsed = (boolean) true, "
"systemstream = (boolean) false, "
"width = (int) [ 16, 4096 ], "
"height = (int) [ 16, 4096 ], "
"pixel-aspect-ratio = (fraction) [ 0/1, MAX ], "
"framerate = (fraction) [ 0/1, MAX ]")
);
static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("video/mpeg, "
"mpegversion = (int) [ 1, 2 ], "
"parsed = (boolean) false, " "systemstream = (boolean) false")
);
/* MpegVideoParse signals and args */
enum
{
/* FILL ME */
LAST_SIGNAL
};
enum
{
ARG_0
/* FILL ME */
};
static void gst_mpegvideoparse_class_init (MpegVideoParseClass * klass);
static void gst_mpegvideoparse_base_init (MpegVideoParseClass * klass);
static void gst_mpegvideoparse_init (MpegVideoParse * mpegvideoparse);
static void gst_mpegvideoparse_dispose (GObject * object);
static GstFlowReturn gst_mpegvideoparse_chain (GstPad * pad, GstBuffer * buf);
static gboolean mpv_parse_sink_event (GstPad * pad, GstEvent * event);
static void gst_mpegvideoparse_flush (MpegVideoParse * mpegvideoparse);
static GstStateChangeReturn
gst_mpegvideoparse_change_state (GstElement * element,
GstStateChange transition);
static GstElementClass *parent_class = NULL;
/*static guint gst_mpegvideoparse_signals[LAST_SIGNAL] = { 0 }; */
GType
mpegvideoparse_get_type (void)
{
static GType mpegvideoparse_type = 0;
if (!mpegvideoparse_type) {
static const GTypeInfo mpegvideoparse_info = {
sizeof (MpegVideoParseClass),
(GBaseInitFunc) gst_mpegvideoparse_base_init,
NULL,
(GClassInitFunc) gst_mpegvideoparse_class_init,
NULL,
NULL,
sizeof (MpegVideoParse),
0,
(GInstanceInitFunc) gst_mpegvideoparse_init,
};
mpegvideoparse_type =
g_type_register_static (GST_TYPE_ELEMENT, "MpegVideoParse",
&mpegvideoparse_info, 0);
}
return mpegvideoparse_type;
}
static void
gst_mpegvideoparse_base_init (MpegVideoParseClass * klass)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&src_template));
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&sink_template));
gst_element_class_set_details (element_class, &mpegvideoparse_details);
}
static void
gst_mpegvideoparse_class_init (MpegVideoParseClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
gstelement_class = (GstElementClass *) klass;
gobject_class = G_OBJECT_CLASS (klass);
parent_class = g_type_class_peek_parent (klass);
gobject_class->dispose = (GObjectFinalizeFunc) (gst_mpegvideoparse_dispose);
gstelement_class->change_state = gst_mpegvideoparse_change_state;
}
static void
mpv_parse_reset (MpegVideoParse * mpegvideoparse)
{
mpegvideoparse->seq_hdr.mpeg_version = 0;
mpegvideoparse->seq_hdr.width = mpegvideoparse->seq_hdr.height = -1;
mpegvideoparse->seq_hdr.fps_n = mpegvideoparse->seq_hdr.par_w = 0;
mpegvideoparse->seq_hdr.fps_d = mpegvideoparse->seq_hdr.par_h = 1;
}
static void
gst_mpegvideoparse_init (MpegVideoParse * mpegvideoparse)
{
mpegvideoparse->sinkpad =
gst_pad_new_from_static_template (&sink_template, "sink");
gst_pad_set_chain_function (mpegvideoparse->sinkpad,
gst_mpegvideoparse_chain);
gst_pad_set_event_function (mpegvideoparse->sinkpad, mpv_parse_sink_event);
gst_element_add_pad (GST_ELEMENT (mpegvideoparse), mpegvideoparse->sinkpad);
mpegvideoparse->srcpad =
gst_pad_new_from_static_template (&src_template, "src");
gst_pad_use_fixed_caps (mpegvideoparse->srcpad);
gst_element_add_pad (GST_ELEMENT (mpegvideoparse), mpegvideoparse->srcpad);
mpeg_packetiser_init (&mpegvideoparse->packer);
mpv_parse_reset (mpegvideoparse);
}
void
gst_mpegvideoparse_dispose (GObject * object)
{
MpegVideoParse *mpegvideoparse = GST_MPEGVIDEOPARSE (object);
mpeg_packetiser_free (&mpegvideoparse->packer);
gst_buffer_replace (&mpegvideoparse->seq_hdr_buf, NULL);
G_OBJECT_CLASS (parent_class)->dispose (object);
}
static gboolean
mpegvideoparse_handle_sequence (MpegVideoParse * mpegvideoparse,
GstBuffer * buf)
{
MPEGSeqHdr new_hdr;
guint8 *cur, *end;
cur = GST_BUFFER_DATA (buf);
end = GST_BUFFER_DATA (buf) + GST_BUFFER_SIZE (buf);
memset (&new_hdr, 0, sizeof (MPEGSeqHdr));
if (G_UNLIKELY (!mpeg_util_parse_sequence_hdr (&new_hdr, cur, end)))
return FALSE;
if (memcmp (&mpegvideoparse->seq_hdr, &new_hdr, sizeof (MPEGSeqHdr)) != 0) {
GstCaps *caps;
GstBuffer *seq_buf;
/* Store the entire sequence header + sequence header extension
for output as codec_data */
seq_buf = gst_buffer_copy (buf);
gst_buffer_replace (&mpegvideoparse->seq_hdr_buf, seq_buf);
gst_buffer_unref (seq_buf);
caps = gst_caps_new_simple ("video/mpeg",
"systemstream", G_TYPE_BOOLEAN, FALSE,
"parsed", G_TYPE_BOOLEAN, TRUE,
"mpegversion", G_TYPE_INT, new_hdr.mpeg_version,
"width", G_TYPE_INT, new_hdr.width,
"height", G_TYPE_INT, new_hdr.height,
"framerate", GST_TYPE_FRACTION, new_hdr.fps_n, new_hdr.fps_d,
"pixel-aspect-ratio", GST_TYPE_FRACTION, new_hdr.par_w, new_hdr.par_h,
"codec_data", GST_TYPE_BUFFER, seq_buf, NULL);
GST_DEBUG ("New mpegvideoparse caps: %" GST_PTR_FORMAT, caps);
if (!gst_pad_set_caps (mpegvideoparse->srcpad, caps))
return FALSE;
/* And update the new_hdr into our stored version */
mpegvideoparse->seq_hdr = new_hdr;
}
return TRUE;
}
static gboolean
mpegvideoparse_handle_picture (MpegVideoParse * mpegvideoparse, GstBuffer * buf)
{
guint8 *cur, *end;
guint32 sync_word = 0xffffffff;
cur = GST_BUFFER_DATA (buf);
end = GST_BUFFER_DATA (buf) + GST_BUFFER_SIZE (buf);
cur = mpeg_util_find_start_code (&sync_word, cur, end);
while (cur != NULL) {
/* Cur points at the last byte of the start code */
if (cur[0] == MPEG_PACKET_PICTURE) {
guint8 *pic_data = cur - 3;
MPEGPictureHdr hdr;
/* pic_data points to the first byte of the sync word now */
if (!mpeg_util_parse_picture_hdr (&hdr, pic_data, end))
return FALSE;
GST_LOG_OBJECT (mpegvideoparse, "Picture type is %u", hdr.pic_type);
/* FIXME: Can use the picture type and number of fields to track a
* timestamp */
}
cur = mpeg_util_find_start_code (&sync_word, cur, end);
}
return TRUE;
}
#if 0
static guint64
gst_mpegvideoparse_time_code (guchar * gop, MPEGSeqHdr * seq_hdr)
{
guint32 data = GST_READ_UINT32_BE (gop);
guint64 seconds;
guint8 frames;
seconds = ((data & 0xfc000000) >> 26) * 3600; /* hours */
seconds += ((data & 0x03f00000) >> 20) * 60; /* minutes */
seconds += (data & 0x0007e000) >> 13; /* seconds */
frames = (data & 0x00001f80) >> 7;
return seconds * GST_SECOND + gst_util_uint64_scale_int (frames * GST_SECOND,
seq_hdr->fps_d, seq_hdr->fps_n);
}
#endif
static void
gst_mpegvideoparse_flush (MpegVideoParse * mpegvideoparse)
{
GST_DEBUG_OBJECT (mpegvideoparse, "mpegvideoparse: flushing");
mpegvideoparse->next_offset = GST_BUFFER_OFFSET_NONE;
}
static GstFlowReturn
mpegvideoparse_drain_avail (MpegVideoParse * mpegvideoparse)
{
MPEGBlockInfo *cur;
GstBuffer *buf = NULL;
GstFlowReturn res = GST_FLOW_OK;
cur = mpeg_packetiser_get_block (&mpegvideoparse->packer, &buf);
while ((cur != NULL) && (res == GST_FLOW_OK)) {
/* Handle the block */
GST_LOG_OBJECT (mpegvideoparse,
"Have block of size %u with pack_type 0x%02x and flags 0x%02x\n",
cur->length, cur->first_pack_type, cur->flags);
/* Don't start pushing out buffers until we've seen a sequence header */
if (mpegvideoparse->seq_hdr.mpeg_version == 0) {
if (cur->flags & MPEG_BLOCK_FLAG_SEQUENCE) {
/* Found a sequence header */
if (!mpegvideoparse_handle_sequence (mpegvideoparse, buf)) {
GST_DEBUG_OBJECT (mpegvideoparse,
"Invalid sequence header. Dropping buffer.");
gst_buffer_unref (buf);
buf = NULL;
}
} else {
if (buf) {
GST_DEBUG_OBJECT (mpegvideoparse,
"No sequence header yet. Dropping buffer of %u bytes",
GST_BUFFER_SIZE (buf));
gst_buffer_unref (buf);
buf = NULL;
}
}
}
if (buf != NULL) {
/* If outputting a PICTURE packet, we can calculate the duration
and possibly the timestamp */
if (cur->flags & MPEG_BLOCK_FLAG_PICTURE) {
if (!mpegvideoparse_handle_picture (mpegvideoparse, buf)) {
/* Corrupted picture. Drop it. */
GST_DEBUG_OBJECT (mpegvideoparse,
"Corrupted picture header. Dropping buffer of %u bytes",
GST_BUFFER_SIZE (buf));
mpegvideoparse->need_discont = TRUE;
gst_buffer_unref (buf);
buf = NULL;
}
}
}
if (buf != NULL) {
GST_DEBUG_OBJECT (mpegvideoparse,
"mpegvideoparse: pushing buffer of %u bytes with ts %"
GST_TIME_FORMAT, GST_BUFFER_SIZE (buf),
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
gst_buffer_set_caps (buf, GST_PAD_CAPS (mpegvideoparse->srcpad));
if (mpegvideoparse->need_discont) {
GST_DEBUG_OBJECT (mpegvideoparse,
"setting discont flag on outgoing buffer");
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
mpegvideoparse->need_discont = FALSE;
}
res = gst_pad_push (mpegvideoparse->srcpad, buf);
}
/* Advance to the next data block */
mpeg_packetiser_next_block (&mpegvideoparse->packer);
cur = mpeg_packetiser_get_block (&mpegvideoparse->packer, &buf);
};
return res;
}
static GstFlowReturn
gst_mpegvideoparse_chain (GstPad * pad, GstBuffer * buf)
{
MpegVideoParse *mpegvideoparse;
GstFlowReturn res;
gboolean have_discont;
guint64 next_offset = GST_BUFFER_OFFSET_NONE;
g_return_val_if_fail (pad != NULL, GST_FLOW_ERROR);
g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);
mpegvideoparse =
GST_MPEGVIDEOPARSE (gst_object_get_parent (GST_OBJECT (pad)));
GST_DEBUG_OBJECT (mpegvideoparse,
"mpegvideoparse: received buffer of %u bytes with ts %"
GST_TIME_FORMAT " and offset %" G_GINT64_FORMAT, GST_BUFFER_SIZE (buf),
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_BUFFER_OFFSET (buf));
/* If we have an offset, and the incoming offset doesn't match,
or we have a discont, handle it first by flushing out data
we have collected. */
have_discont = GST_BUFFER_IS_DISCONT (buf);
if (mpegvideoparse->next_offset != GST_BUFFER_OFFSET_NONE) {
if (GST_BUFFER_OFFSET_IS_VALID (buf)) {
if (mpegvideoparse->next_offset != GST_BUFFER_OFFSET (buf))
have_discont = TRUE;
next_offset = GST_BUFFER_OFFSET (buf) + GST_BUFFER_SIZE (buf);
} else {
next_offset = mpegvideoparse->next_offset + GST_BUFFER_SIZE (buf);
}
}
/* Clear out any existing stuff if the new buffer is discontinuous */
if (have_discont) {
GST_DEBUG_OBJECT (mpegvideoparse, "Have discont packet, draining data");
mpegvideoparse->need_discont = TRUE;
mpeg_packetiser_handle_eos (&mpegvideoparse->packer);
res = mpegvideoparse_drain_avail (mpegvideoparse);
mpeg_packetiser_flush (&mpegvideoparse->packer);
if (res != GST_FLOW_OK) {
mpegvideoparse->next_offset = next_offset;
gst_buffer_unref (buf);
return res;
}
}
/* Takes ownership of the data */
mpeg_packetiser_add_buf (&mpegvideoparse->packer, buf);
/* And push out what we can */
res = mpegvideoparse_drain_avail (mpegvideoparse);
/* Update our offset */
mpegvideoparse->next_offset = next_offset;
gst_object_unref (mpegvideoparse);
return res;
}
static gboolean
mpv_parse_sink_event (GstPad * pad, GstEvent * event)
{
gboolean res = TRUE;
MpegVideoParse *mpegvideoparse =
GST_MPEGVIDEOPARSE (gst_pad_get_parent (pad));
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_NEWSEGMENT:
{
gdouble rate, applied_rate;
GstFormat format;
gint64 start, stop, pos;
gboolean update;
gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
&format, &start, &stop, &pos);
if (format == GST_FORMAT_BYTES) {
/* FIXME: Later, we might use a seek table to seek on elementary stream
files, and that would allow byte-to-time conversions. It's not a high
priority - most mpeg video is muxed and then the demuxer handles
seeking. In the meantime, here's some commented out logic copied
from mp3parse */
#if 0
GstClockTime seg_start, seg_stop, seg_pos;
/* stop time is allowed to be open-ended, but not start & pos */
if (!mp3parse_bytepos_to_time (mp3parse, stop, &seg_stop))
seg_stop = GST_CLOCK_TIME_NONE;
if (mp3parse_bytepos_to_time (mp3parse, start, &seg_start) &&
mp3parse_bytepos_to_time (mp3parse, pos, &seg_pos)) {
gst_event_unref (event);
event = gst_event_new_new_segment_full (update, rate, applied_rate,
GST_FORMAT_TIME, seg_start, seg_stop, seg_pos);
format = GST_FORMAT_TIME;
GST_DEBUG_OBJECT (mp3parse, "Converted incoming segment to TIME. "
"start = %" G_GINT64_FORMAT ", stop = %" G_GINT64_FORMAT
"pos = %" G_GINT64_FORMAT, seg_start, seg_stop, seg_pos);
}
#endif
}
if (format != GST_FORMAT_TIME) {
/* Unknown incoming segment format. Output a default open-ended
* TIME segment */
gst_event_unref (event);
event = gst_event_new_new_segment_full (update, rate, applied_rate,
GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0);
}
gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
&format, &start, &stop, &pos);
GST_DEBUG_OBJECT (mpegvideoparse,
"Pushing newseg rate %g, applied rate %g, format %d, start %"
G_GINT64_FORMAT ", stop %" G_GINT64_FORMAT ", pos %" G_GINT64_FORMAT,
rate, applied_rate, format, start, stop, pos);
res = gst_pad_event_default (pad, event);
break;
}
case GST_EVENT_FLUSH_STOP:
gst_mpegvideoparse_flush (mpegvideoparse);
res = gst_pad_event_default (pad, event);
break;
case GST_EVENT_EOS:
/* Push any remaining buffers out, then flush. */
mpeg_packetiser_handle_eos (&mpegvideoparse->packer);
mpegvideoparse_drain_avail (mpegvideoparse);
gst_mpegvideoparse_flush (mpegvideoparse);
res = gst_pad_event_default (pad, event);
break;
default:
res = gst_pad_event_default (pad, event);
break;
}
gst_object_unref (mpegvideoparse);
return res;
}
static GstStateChangeReturn
gst_mpegvideoparse_change_state (GstElement * element,
GstStateChange transition)
{
MpegVideoParse *mpegvideoparse;
GstStateChangeReturn ret;
g_return_val_if_fail (GST_IS_MPEGVIDEOPARSE (element),
GST_STATE_CHANGE_FAILURE);
mpegvideoparse = GST_MPEGVIDEOPARSE (element);
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
switch (transition) {
case GST_STATE_CHANGE_PAUSED_TO_READY:
mpv_parse_reset (mpegvideoparse);
break;
default:
break;
}
return ret;
}
static gboolean
plugin_init (GstPlugin * plugin)
{
GST_DEBUG_CATEGORY_INIT (mpv_parse_debug, "mpegvideoparse", 0,
"MPEG Video Parser");
return gst_element_register (plugin, "mpegvideoparse",
GST_RANK_SECONDARY - 1, GST_TYPE_MPEGVIDEOPARSE);
}
GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
GST_VERSION_MINOR,
"mpegvideoparse",
"MPEG-1 and MPEG-2 video parser",
plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)