mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-12-22 16:26:39 +00:00
gst-libs/gst/riff/riff-read.c: Additional pad usability check.
Original commit message from CVS: 2004-01-25 Ronald Bultje <rbultje@ronald.bitfreak.net> * gst-libs/gst/riff/riff-read.c: (gst_riff_read_info): Additional pad usability check. * gst/mpeg1videoparse/gstmp1videoparse.c: (gst_mp1videoparse_init), (mp1videoparse_find_next_gop), (gst_mp1videoparse_time_code), (gst_mp1videoparse_real_chain): Fix MPEG video stream parsing. The original plugin had several issues, including not timestamping streams where the source was not timestamped (this happens with PTS values in mpeg system streams, but MPEG video is also a valid stream on its own so that needs timestamps too). We use the display time code for that for now. Also, if one incoming buffer contains multiple valid frames, we push them all on correctly now, including proper EOS handling. Lastly, several potential segfaults were fixed, and we properly sync on new sequence/gop headers to include them in next, not previous frames (since they're header for the next frame, not the previous). Also see #119206. * gst/mpegaudioparse/gstmpegaudioparse.c: (gst_mp3parse_chain), (bpf_from_header): Move caps setting so we only do it after finding several valid MPEG-1 fraes sequentially, not right after the first one (which might be coincidental). * gst/typefind/gsttypefindfunctions.c: (mpeg1_sys_type_find), (mpeg_video_type_find), (mpeg_video_stream_type_find), (plugin_init): Add unsynced MPEG video stream typefinding, and change some probability values so we detect streams rightly. The idea is as follows: I can have an unsynced system stream which contains video. In the current code, I would randomly get a type for either system or video stream type found, because the probabilities are being calculated rather randomly. I now use fixed values, so we always prefer system stream if that was found (and that is how it should be). If no system stream was found, we can still identity the stream as video-only.
This commit is contained in:
parent
c5740934ac
commit
858534caa0
4 changed files with 123 additions and 27 deletions
35
ChangeLog
35
ChangeLog
|
@ -1,3 +1,38 @@
|
|||
2004-01-25 Ronald Bultje <rbultje@ronald.bitfreak.net>
|
||||
|
||||
* gst-libs/gst/riff/riff-read.c: (gst_riff_read_info):
|
||||
Additional pad usability check.
|
||||
* gst/mpeg1videoparse/gstmp1videoparse.c: (gst_mp1videoparse_init),
|
||||
(mp1videoparse_find_next_gop), (gst_mp1videoparse_time_code),
|
||||
(gst_mp1videoparse_real_chain):
|
||||
Fix MPEG video stream parsing. The original plugin had several
|
||||
issues, including not timestamping streams where the source was
|
||||
not timestamped (this happens with PTS values in mpeg system
|
||||
streams, but MPEG video is also a valid stream on its own so
|
||||
that needs timestamps too). We use the display time code for that
|
||||
for now. Also, if one incoming buffer contains multiple valid
|
||||
frames, we push them all on correctly now, including proper EOS
|
||||
handling. Lastly, several potential segfaults were fixed, and we
|
||||
properly sync on new sequence/gop headers to include them in next,
|
||||
not previous frames (since they're header for the next frame, not
|
||||
the previous). Also see #119206.
|
||||
* gst/mpegaudioparse/gstmpegaudioparse.c: (gst_mp3parse_chain),
|
||||
(bpf_from_header):
|
||||
Move caps setting so we only do it after finding several valid
|
||||
MPEG-1 fraes sequentially, not right after the first one (which
|
||||
might be coincidental).
|
||||
* gst/typefind/gsttypefindfunctions.c: (mpeg1_sys_type_find),
|
||||
(mpeg_video_type_find), (mpeg_video_stream_type_find),
|
||||
(plugin_init):
|
||||
Add unsynced MPEG video stream typefinding, and change some
|
||||
probability values so we detect streams rightly. The idea is as
|
||||
follows: I can have an unsynced system stream which contains
|
||||
video. In the current code, I would randomly get a type for either
|
||||
system or video stream type found, because the probabilities are
|
||||
being calculated rather randomly. I now use fixed values, so we
|
||||
always prefer system stream if that was found (and that is how it
|
||||
should be). If no system stream was found, we can still identity the stream as video-only.
|
||||
|
||||
2004-01-23 Benjamin Otte <in7y118@public.uni-hamburg.de>
|
||||
|
||||
* gst/avi/gstavidemux.c: (gst_avi_demux_stream_avih),
|
||||
|
|
|
@ -815,7 +815,7 @@ gst_riff_read_info (GstRiffRead *riff)
|
|||
/* let the world know about this wonderful thing */
|
||||
for (padlist = gst_element_get_pad_list (element);
|
||||
padlist != NULL; padlist = padlist->next) {
|
||||
if (GST_PAD_IS_SRC (padlist->data)) {
|
||||
if (GST_PAD_IS_SRC (padlist->data) && GST_PAD_IS_USABLE(padlist->data)) {
|
||||
gst_event_ref (event);
|
||||
gst_pad_push (GST_PAD (padlist->data), GST_DATA (event));
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ gst_mp1videoparse_init (Mp1VideoParse *mp1videoparse)
|
|||
|
||||
mp1videoparse->partialbuf = NULL;
|
||||
mp1videoparse->need_resync = FALSE;
|
||||
mp1videoparse->last_pts = 0;
|
||||
mp1videoparse->last_pts = GST_CLOCK_TIME_NONE;
|
||||
mp1videoparse->picture_in_buffer = 0;
|
||||
mp1videoparse->width = mp1videoparse->height = -1;
|
||||
mp1videoparse->fps = mp1videoparse->asr = 0.;
|
||||
|
@ -259,8 +259,9 @@ mp1videoparse_find_next_gop (Mp1VideoParse *mp1videoparse, GstBuffer *buf)
|
|||
have_sync = TRUE;
|
||||
}
|
||||
else if (have_sync) {
|
||||
if (byte == (SEQ_START_CODE & 0xff) || byte == (GOP_START_CODE & 0xff)) return offset-4;
|
||||
else {
|
||||
if (byte == (SEQ_START_CODE & 0xff) || byte == (GOP_START_CODE & 0xff)) {
|
||||
return offset - 4;
|
||||
} else {
|
||||
sync_zeros = 0;
|
||||
have_sync = FALSE;
|
||||
}
|
||||
|
@ -272,6 +273,19 @@ mp1videoparse_find_next_gop (Mp1VideoParse *mp1videoparse, GstBuffer *buf)
|
|||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static guint64
|
||||
gst_mp1videoparse_time_code (guchar *gop,
|
||||
gfloat fps)
|
||||
{
|
||||
guint32 data = GUINT32_FROM_BE (* (guint32 *) gop);
|
||||
|
||||
return ((((data & 0xfc000000) >> 26) * 3600 * GST_SECOND) + /* hours */
|
||||
(((data & 0x03f00000) >> 20) * 60 * GST_SECOND) + /* minutes */
|
||||
(((data & 0x0007e000) >> 13) * GST_SECOND) + /* seconds */
|
||||
(((data & 0x00001f80) >> 7) * GST_SECOND / fps)); /* frames */
|
||||
}
|
||||
|
||||
static void
|
||||
gst_mp1videoparse_flush (Mp1VideoParse *mp1videoparse)
|
||||
{
|
||||
|
@ -370,7 +384,7 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
mp1videoparse->partialbuf) ||
|
||||
mp1videoparse->need_resync) {
|
||||
sync_pos = mp1videoparse_find_next_gop(mp1videoparse, mp1videoparse->partialbuf);
|
||||
if (sync_pos != -1) {
|
||||
if (sync_pos >= 0) {
|
||||
mp1videoparse->need_resync = FALSE;
|
||||
GST_DEBUG ("mp1videoparse: found new gop at %d", sync_pos);
|
||||
|
||||
|
@ -383,6 +397,14 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
size = GST_BUFFER_SIZE(mp1videoparse->partialbuf);
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
head = GUINT32_FROM_BE(*((guint32 *)data));
|
||||
/* re-call this function so that if we hadn't already, we can
|
||||
* now read the sequence header and parse video properties,
|
||||
* set caps, stream data, be happy, bla, bla, bla... */
|
||||
if (!mp1videoparse_valid_sync (mp1videoparse, head,
|
||||
mp1videoparse->partialbuf))
|
||||
g_error ("Found sync but no valid sync point at pos 0x0");
|
||||
}
|
||||
else {
|
||||
GST_DEBUG ("mp1videoparse: could not sync");
|
||||
|
@ -392,7 +414,8 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
}
|
||||
}
|
||||
|
||||
if (mp1videoparse->picture_in_buffer == 1) {
|
||||
if (mp1videoparse->picture_in_buffer == 1 &&
|
||||
time_stamp != GST_CLOCK_TIME_NONE) {
|
||||
mp1videoparse->last_pts = time_stamp;
|
||||
}
|
||||
|
||||
|
@ -403,7 +426,6 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
|
||||
while (offset < size-1) {
|
||||
sync_byte = *(data + offset);
|
||||
/*printf(" %d %02x\n", offset, sync_byte); */
|
||||
if (sync_byte == 0) {
|
||||
sync_state++;
|
||||
}
|
||||
|
@ -412,7 +434,9 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
if (data[offset+1] == (PICTURE_START_CODE & 0xff)) {
|
||||
mp1videoparse->picture_in_buffer++;
|
||||
if (mp1videoparse->picture_in_buffer == 1) {
|
||||
mp1videoparse->last_pts = time_stamp;
|
||||
if (time_stamp != GST_CLOCK_TIME_NONE) {
|
||||
mp1videoparse->last_pts = time_stamp;
|
||||
}
|
||||
sync_state = 0;
|
||||
}
|
||||
else if (mp1videoparse->picture_in_buffer == 2) {
|
||||
|
@ -424,6 +448,33 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
/* A new sequence (or GOP) is a valid sync too. Note that the
|
||||
* sequence header should be put in the next buffer, not here. */
|
||||
else if (data[offset+1] == (SEQ_START_CODE & 0xFF) ||
|
||||
data[offset+1] == (GOP_START_CODE & 0xFF)) {
|
||||
if (mp1videoparse->picture_in_buffer == 0 &&
|
||||
data[offset+1] == (GOP_START_CODE & 0xFF)) {
|
||||
mp1videoparse->last_pts = gst_mp1videoparse_time_code (&data[2],
|
||||
mp1videoparse->fps);
|
||||
}
|
||||
else if (mp1videoparse->picture_in_buffer == 1) {
|
||||
have_sync = TRUE;
|
||||
break;
|
||||
} else {
|
||||
g_assert (mp1videoparse->picture_in_buffer == 0);
|
||||
}
|
||||
}
|
||||
/* end-of-sequence is a valid sync point and should be included
|
||||
* in the current picture, not the next. */
|
||||
else if (data[offset+1] == (SEQ_END_CODE & 0xFF)) {
|
||||
if (mp1videoparse->picture_in_buffer == 1) {
|
||||
offset += 4;
|
||||
have_sync = TRUE;
|
||||
break;
|
||||
} else {
|
||||
g_assert (mp1videoparse->picture_in_buffer == 0);
|
||||
}
|
||||
}
|
||||
else sync_state = 0;
|
||||
}
|
||||
/* something else... */
|
||||
|
@ -434,12 +485,11 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
|
||||
if (have_sync) {
|
||||
offset -= 2;
|
||||
GST_DEBUG ("mp1videoparse: synced at %ld code 0x000001%02x",offset,data[offset+3]);
|
||||
|
||||
outbuf = gst_buffer_create_sub(mp1videoparse->partialbuf, 0, offset+4);
|
||||
outbuf = gst_buffer_create_sub(mp1videoparse->partialbuf, 0, offset);
|
||||
g_assert(outbuf != NULL);
|
||||
GST_BUFFER_TIMESTAMP(outbuf) = mp1videoparse->last_pts;
|
||||
GST_BUFFER_DURATION(outbuf) = GST_SECOND / mp1videoparse->fps;
|
||||
mp1videoparse->last_pts += GST_BUFFER_DURATION (outbuf);
|
||||
|
||||
if (mp1videoparse->in_flush) {
|
||||
/* FIXME, send a flush event here */
|
||||
|
@ -456,12 +506,17 @@ gst_mp1videoparse_real_chain (Mp1VideoParse *mp1videoparse, GstBuffer *buf, GstP
|
|||
}
|
||||
mp1videoparse->picture_in_buffer = 0;
|
||||
|
||||
temp = gst_buffer_create_sub(mp1videoparse->partialbuf, offset, size-offset);
|
||||
if (size > offset)
|
||||
temp = gst_buffer_create_sub(mp1videoparse->partialbuf, offset, size-offset);
|
||||
else
|
||||
temp = NULL;
|
||||
gst_buffer_unref(mp1videoparse->partialbuf);
|
||||
mp1videoparse->partialbuf = temp;
|
||||
}
|
||||
else {
|
||||
mp1videoparse->last_pts = time_stamp;
|
||||
if (time_stamp != GST_CLOCK_TIME_NONE) {
|
||||
mp1videoparse->last_pts = time_stamp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -402,6 +402,26 @@ gst_mp3parse_chain (GstPad *pad, GstData *_data)
|
|||
GST_DEBUG ("mp3parse: partial buffer needed %ld < %d ",(size-offset), bpf);
|
||||
break;
|
||||
} else {
|
||||
guint bitrate, layer, rate, channels;
|
||||
|
||||
if (!mp3_type_frame_length_from_header (header, &layer,
|
||||
&channels,
|
||||
&bitrate, &rate)) {
|
||||
g_error("Header failed internal error");
|
||||
}
|
||||
if (channels != mp3parse->channels ||
|
||||
rate != mp3parse->rate ||
|
||||
layer != mp3parse->layer ||
|
||||
bitrate != mp3parse->bit_rate) {
|
||||
GstCaps *caps = mp3_caps_create (layer, channels, bitrate, rate);
|
||||
|
||||
gst_pad_set_explicit_caps(mp3parse->srcpad, caps);
|
||||
|
||||
mp3parse->channels = channels;
|
||||
mp3parse->layer = layer;
|
||||
mp3parse->rate = rate;
|
||||
mp3parse->bit_rate = bitrate;
|
||||
}
|
||||
|
||||
outbuf = gst_buffer_create_sub(mp3parse->partialbuf,offset,bpf);
|
||||
|
||||
|
@ -460,20 +480,6 @@ bpf_from_header (GstMPEGAudioParse *parse, unsigned long header)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (channels != parse->channels ||
|
||||
rate != parse->rate ||
|
||||
layer != parse->layer ||
|
||||
bitrate != parse->bit_rate) {
|
||||
GstCaps *caps = mp3_caps_create (layer, channels, bitrate, rate);
|
||||
|
||||
gst_pad_set_explicit_caps(parse->srcpad, caps);
|
||||
|
||||
parse->channels = channels;
|
||||
parse->layer = layer;
|
||||
parse->rate = rate;
|
||||
parse->bit_rate = bitrate;
|
||||
}
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue