mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-11-29 21:21:12 +00:00
1178 lines
36 KiB
C
1178 lines
36 KiB
C
/* GStreamer
|
|
* Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Library General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Library General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Library General Public
|
|
* License along with this library; if not, write to the
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
* Boston, MA 02111-1307, USA.
|
|
*/
|
|
|
|
/**
|
|
* SECTION:element-videorate
|
|
*
|
|
* This element takes an incoming stream of timestamped video frames.
|
|
* It will produce a perfect stream that matches the source pad's framerate.
|
|
*
|
|
* The correction is performed by dropping and duplicating frames, no fancy
|
|
* algorithm is used to interpolate frames (yet).
|
|
*
|
|
* By default the element will simply negotiate the same framerate on its
|
|
* source and sink pad.
|
|
*
|
|
* This operation is useful to link to elements that require a perfect stream.
|
|
* Typical examples are formats that do not store timestamps for video frames,
|
|
* but only store a framerate, like Ogg and AVI.
|
|
*
|
|
* A conversion to a specific framerate can be forced by using filtered caps on
|
|
* the source pad.
|
|
*
|
|
* The properties #GstVideoRate:in, #GstVideoRate:out, #GstVideoRate:duplicate
|
|
* and #GstVideoRate:drop can be read to obtain information about number of
|
|
* input frames, output frames, dropped frames (i.e. the number of unused input
|
|
* frames) and duplicated frames (i.e. the number of times an input frame was
|
|
* duplicated, beside being used normally).
|
|
*
|
|
* An input stream that needs no adjustments will thus never have dropped or
|
|
* duplicated frames.
|
|
*
|
|
* When the #GstVideoRate:silent property is set to FALSE, a GObject property
|
|
* notification will be emitted whenever one of the #GstVideoRate:duplicate or
|
|
* #GstVideoRate:drop values changes.
|
|
* This can potentially cause performance degradation.
|
|
* Note that property notification will happen from the streaming thread, so
|
|
* applications should be prepared for this.
|
|
*
|
|
* <refsect2>
|
|
* <title>Example pipelines</title>
|
|
* |[
|
|
* gst-launch -v filesrc location=videotestsrc.ogg ! oggdemux ! theoradec ! videorate ! video/x-raw,framerate=15/1 ! xvimagesink
|
|
* ]| Decode an Ogg/Theora file and adjust the framerate to 15 fps before playing.
|
|
* To create the test Ogg/Theora file refer to the documentation of theoraenc.
|
|
* |[
|
|
* gst-launch -v v4l2src ! videorate ! video/x-raw,framerate=25/2 ! theoraenc ! oggmux ! filesink location=recording.ogg
|
|
* ]| Capture video from a V4L device, and adjust the stream to 12.5 fps before
|
|
* encoding to Ogg/Theora.
|
|
* </refsect2>
|
|
*
|
|
* Last reviewed on 2006-09-02 (0.10.11)
|
|
*/
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
#include "config.h"
|
|
#endif
|
|
|
|
#include "gstvideorate.h"
|
|
|
|
GST_DEBUG_CATEGORY_STATIC (video_rate_debug);
|
|
#define GST_CAT_DEFAULT video_rate_debug
|
|
|
|
/* GstVideoRate signals and args */
|
|
enum
|
|
{
|
|
/* FILL ME */
|
|
LAST_SIGNAL
|
|
};
|
|
|
|
#define DEFAULT_SILENT TRUE
|
|
#define DEFAULT_NEW_PREF 1.0
|
|
#define DEFAULT_SKIP_TO_FIRST FALSE
|
|
#define DEFAULT_DROP_ONLY FALSE
|
|
#define DEFAULT_AVERAGE_PERIOD 0
|
|
|
|
enum
|
|
{
|
|
ARG_0,
|
|
ARG_IN,
|
|
ARG_OUT,
|
|
ARG_DUP,
|
|
ARG_DROP,
|
|
ARG_SILENT,
|
|
ARG_NEW_PREF,
|
|
ARG_SKIP_TO_FIRST,
|
|
ARG_DROP_ONLY,
|
|
ARG_AVERAGE_PERIOD
|
|
/* FILL ME */
|
|
};
|
|
|
|
static GstStaticPadTemplate gst_video_rate_src_template =
|
|
GST_STATIC_PAD_TEMPLATE ("src",
|
|
GST_PAD_SRC,
|
|
GST_PAD_ALWAYS,
|
|
GST_STATIC_CAPS ("video/x-raw;" "image/jpeg;" "image/png")
|
|
);
|
|
|
|
static GstStaticPadTemplate gst_video_rate_sink_template =
|
|
GST_STATIC_PAD_TEMPLATE ("sink",
|
|
GST_PAD_SINK,
|
|
GST_PAD_ALWAYS,
|
|
GST_STATIC_CAPS ("video/x-raw;" "image/jpeg;" "image/png")
|
|
);
|
|
|
|
static void gst_video_rate_swap_prev (GstVideoRate * videorate,
|
|
GstBuffer * buffer, gint64 time);
|
|
static gboolean gst_video_rate_event (GstPad * pad, GstEvent * event);
|
|
static gboolean gst_video_rate_query (GstPad * pad, GstQuery * query);
|
|
static GstFlowReturn gst_video_rate_chain (GstPad * pad, GstBuffer * buffer);
|
|
|
|
static void gst_video_rate_set_property (GObject * object,
|
|
guint prop_id, const GValue * value, GParamSpec * pspec);
|
|
static void gst_video_rate_get_property (GObject * object,
|
|
guint prop_id, GValue * value, GParamSpec * pspec);
|
|
|
|
static GstStateChangeReturn gst_video_rate_change_state (GstElement * element,
|
|
GstStateChange transition);
|
|
|
|
/*static guint gst_video_rate_signals[LAST_SIGNAL] = { 0 }; */
|
|
|
|
static GParamSpec *pspec_drop = NULL;
|
|
static GParamSpec *pspec_duplicate = NULL;
|
|
|
|
#define gst_video_rate_parent_class parent_class
|
|
G_DEFINE_TYPE (GstVideoRate, gst_video_rate, GST_TYPE_ELEMENT);
|
|
|
|
static void
|
|
gst_video_rate_class_init (GstVideoRateClass * klass)
|
|
{
|
|
GObjectClass *object_class = G_OBJECT_CLASS (klass);
|
|
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
|
|
|
|
parent_class = g_type_class_peek_parent (klass);
|
|
|
|
object_class->set_property = gst_video_rate_set_property;
|
|
object_class->get_property = gst_video_rate_get_property;
|
|
|
|
g_object_class_install_property (object_class, ARG_IN,
|
|
g_param_spec_uint64 ("in", "In",
|
|
"Number of input frames", 0, G_MAXUINT64, 0,
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
|
|
g_object_class_install_property (object_class, ARG_OUT,
|
|
g_param_spec_uint64 ("out", "Out", "Number of output frames", 0,
|
|
G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
|
|
pspec_duplicate = g_param_spec_uint64 ("duplicate", "Duplicate",
|
|
"Number of duplicated frames", 0, G_MAXUINT64, 0,
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
|
g_object_class_install_property (object_class, ARG_DUP, pspec_duplicate);
|
|
pspec_drop = g_param_spec_uint64 ("drop", "Drop", "Number of dropped frames",
|
|
0, G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
|
g_object_class_install_property (object_class, ARG_DROP, pspec_drop);
|
|
g_object_class_install_property (object_class, ARG_SILENT,
|
|
g_param_spec_boolean ("silent", "silent",
|
|
"Don't emit notify for dropped and duplicated frames", DEFAULT_SILENT,
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
g_object_class_install_property (object_class, ARG_NEW_PREF,
|
|
g_param_spec_double ("new-pref", "New Pref",
|
|
"Value indicating how much to prefer new frames (unused)", 0.0, 1.0,
|
|
DEFAULT_NEW_PREF, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
/**
|
|
* GstVideoRate:skip-to-first:
|
|
*
|
|
* Don't produce buffers before the first one we receive.
|
|
*
|
|
* Since: 0.10.25
|
|
*/
|
|
g_object_class_install_property (object_class, ARG_SKIP_TO_FIRST,
|
|
g_param_spec_boolean ("skip-to-first", "Skip to first buffer",
|
|
"Don't produce buffers before the first one we receive",
|
|
DEFAULT_SKIP_TO_FIRST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
gst_element_class_set_details_simple (element_class,
|
|
"Video rate adjuster", "Filter/Effect/Video",
|
|
"Drops/duplicates/adjusts timestamps on video frames to make a perfect stream",
|
|
"Wim Taymans <wim@fluendo.com>");
|
|
|
|
gst_element_class_add_pad_template (element_class,
|
|
gst_static_pad_template_get (&gst_video_rate_sink_template));
|
|
gst_element_class_add_pad_template (element_class,
|
|
gst_static_pad_template_get (&gst_video_rate_src_template));
|
|
|
|
/**
|
|
* GstVideoRate:drop-only:
|
|
*
|
|
* Only drop frames, no duplicates are produced.
|
|
*
|
|
* Since: 0.10.34
|
|
*/
|
|
g_object_class_install_property (object_class, ARG_DROP_ONLY,
|
|
g_param_spec_boolean ("drop-only", "Only Drop",
|
|
"Only drop frames, no duplicates are produced",
|
|
DEFAULT_DROP_ONLY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
/**
|
|
* GstVideoRate:average-period:
|
|
*
|
|
* Arrange for maximum framerate by dropping frames beyond a certain framerate,
|
|
* where the framerate is calculated using a moving average over the
|
|
* configured.
|
|
*
|
|
* Since: 0.10.34
|
|
*/
|
|
g_object_class_install_property (object_class, ARG_AVERAGE_PERIOD,
|
|
g_param_spec_uint64 ("average-period", "Period over which to average",
|
|
"Period over which to average the framerate (in ns) (0 = disabled)",
|
|
0, G_MAXINT64, DEFAULT_AVERAGE_PERIOD,
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
|
|
|
element_class->change_state = GST_DEBUG_FUNCPTR (gst_video_rate_change_state);
|
|
}
|
|
|
|
/* return the caps that can be used on out_pad given in_caps on in_pad */
|
|
static gboolean
|
|
gst_video_rate_transformcaps (GstPad * in_pad, GstCaps * in_caps,
|
|
GstPad * out_pad, GstCaps ** out_caps, GstCaps * filter)
|
|
{
|
|
GstCaps *intersect, *in_templ;
|
|
gint i;
|
|
GSList *extra_structures = NULL;
|
|
GSList *iter;
|
|
|
|
in_templ = gst_pad_get_pad_template_caps (in_pad);
|
|
intersect =
|
|
gst_caps_intersect_full (in_caps, in_templ, GST_CAPS_INTERSECT_FIRST);
|
|
gst_caps_unref (in_templ);
|
|
|
|
/* all possible framerates are allowed */
|
|
for (i = 0; i < gst_caps_get_size (intersect); i++) {
|
|
GstStructure *structure;
|
|
|
|
structure = gst_caps_get_structure (intersect, i);
|
|
|
|
if (gst_structure_has_field (structure, "framerate")) {
|
|
GstStructure *copy_structure;
|
|
|
|
copy_structure = gst_structure_copy (structure);
|
|
gst_structure_set (copy_structure,
|
|
"framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
|
|
extra_structures = g_slist_append (extra_structures, copy_structure);
|
|
}
|
|
}
|
|
|
|
/* append the extra structures */
|
|
for (iter = extra_structures; iter != NULL; iter = g_slist_next (iter)) {
|
|
gst_caps_append_structure (intersect, (GstStructure *) iter->data);
|
|
}
|
|
g_slist_free (extra_structures);
|
|
|
|
if (filter) {
|
|
GstCaps *tmp;
|
|
|
|
tmp = gst_caps_intersect_full (filter, intersect, GST_CAPS_INTERSECT_FIRST);
|
|
gst_caps_unref (intersect);
|
|
intersect = tmp;
|
|
}
|
|
|
|
*out_caps = intersect;
|
|
|
|
return TRUE;
|
|
}
|
|
|
|
static GstCaps *
|
|
gst_video_rate_getcaps (GstPad * pad, GstCaps * filter)
|
|
{
|
|
GstVideoRate *videorate;
|
|
GstPad *otherpad;
|
|
GstCaps *caps;
|
|
|
|
videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad));
|
|
|
|
otherpad = (pad == videorate->srcpad) ? videorate->sinkpad :
|
|
videorate->srcpad;
|
|
|
|
/* we can do what the peer can */
|
|
caps = gst_pad_peer_get_caps (otherpad, filter);
|
|
if (caps) {
|
|
GstCaps *transform, *intersect;
|
|
|
|
gst_video_rate_transformcaps (otherpad, caps, pad, &transform, filter);
|
|
|
|
/* Now prefer the downstream caps if possible */
|
|
intersect =
|
|
gst_caps_intersect_full (caps, transform, GST_CAPS_INTERSECT_FIRST);
|
|
if (!gst_caps_is_empty (intersect)) {
|
|
gst_caps_append (intersect, transform);
|
|
gst_caps_unref (caps);
|
|
caps = intersect;
|
|
} else {
|
|
gst_caps_unref (intersect);
|
|
caps = transform;
|
|
}
|
|
} else {
|
|
/* no peer, our padtemplate is enough then */
|
|
caps = gst_pad_get_pad_template_caps (pad);
|
|
if (filter) {
|
|
GstCaps *intersection;
|
|
intersection =
|
|
gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
|
|
gst_caps_unref (caps);
|
|
caps = intersection;
|
|
}
|
|
}
|
|
|
|
return caps;
|
|
}
|
|
|
|
static gboolean
|
|
gst_video_rate_set_src_caps (GstVideoRate * videorate, GstCaps * caps)
|
|
{
|
|
GstStructure *structure;
|
|
gint rate_numerator, rate_denominator;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "src caps %" GST_PTR_FORMAT, caps);
|
|
|
|
structure = gst_caps_get_structure (caps, 0);
|
|
if (!gst_structure_get_fraction (structure, "framerate",
|
|
&rate_numerator, &rate_denominator))
|
|
goto no_framerate;
|
|
|
|
/* out_frame_count is scaled by the frame rate caps when calculating next_ts.
|
|
* when the frame rate caps change, we must update base_ts and reset
|
|
* out_frame_count */
|
|
if (videorate->to_rate_numerator) {
|
|
videorate->base_ts +=
|
|
gst_util_uint64_scale (videorate->out_frame_count,
|
|
videorate->to_rate_denominator * GST_SECOND,
|
|
videorate->to_rate_numerator);
|
|
}
|
|
videorate->out_frame_count = 0;
|
|
videorate->to_rate_numerator = rate_numerator;
|
|
videorate->to_rate_denominator = rate_denominator;
|
|
videorate->wanted_diff = gst_util_uint64_scale_int (GST_SECOND,
|
|
rate_denominator, rate_numerator);
|
|
|
|
gst_pad_push_event (videorate->srcpad, gst_event_new_caps (caps));
|
|
|
|
return TRUE;
|
|
|
|
/* ERRORS */
|
|
no_framerate:
|
|
{
|
|
GST_DEBUG_OBJECT (videorate, "no framerate specified");
|
|
return FALSE;
|
|
}
|
|
}
|
|
|
|
static gboolean
|
|
gst_video_rate_set_sink_caps (GstVideoRate * videorate, GstCaps * caps)
|
|
{
|
|
GstStructure *structure;
|
|
gboolean ret = TRUE;
|
|
gint rate_numerator, rate_denominator;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "sink caps %" GST_PTR_FORMAT, caps);
|
|
|
|
structure = gst_caps_get_structure (caps, 0);
|
|
if (!gst_structure_get_fraction (structure, "framerate",
|
|
&rate_numerator, &rate_denominator))
|
|
goto no_framerate;
|
|
|
|
videorate->from_rate_numerator = rate_numerator;
|
|
videorate->from_rate_denominator = rate_denominator;
|
|
|
|
/* now try to find something for the peer */
|
|
if (gst_pad_peer_accept_caps (videorate->srcpad, caps)) {
|
|
/* the peer accepts the caps as they are */
|
|
ret = gst_video_rate_set_src_caps (videorate, caps);
|
|
} else {
|
|
GstCaps *transform = NULL;
|
|
|
|
ret = FALSE;
|
|
|
|
/* see how we can transform the input caps */
|
|
if (!gst_video_rate_transformcaps (videorate->sinkpad, caps,
|
|
videorate->srcpad, &transform, NULL))
|
|
goto no_transform;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "transform %" GST_PTR_FORMAT, transform);
|
|
|
|
/* see what the peer can do */
|
|
caps = gst_pad_peer_get_caps (videorate->srcpad, transform);
|
|
|
|
GST_DEBUG_OBJECT (videorate, "icaps %" GST_PTR_FORMAT, caps);
|
|
|
|
/* could turn up empty, due to e.g. colorspace etc */
|
|
if (gst_caps_get_size (caps) == 0) {
|
|
gst_caps_unref (caps);
|
|
goto no_transform;
|
|
}
|
|
|
|
/* take first possibility */
|
|
caps = gst_caps_make_writable (caps);
|
|
gst_caps_truncate (caps);
|
|
structure = gst_caps_get_structure (caps, 0);
|
|
|
|
/* and fixate */
|
|
gst_structure_fixate_field_nearest_fraction (structure, "framerate",
|
|
rate_numerator, rate_denominator);
|
|
gst_structure_get_fraction (structure, "framerate",
|
|
&rate_numerator, &rate_denominator);
|
|
|
|
videorate->to_rate_numerator = rate_numerator;
|
|
videorate->to_rate_denominator = rate_denominator;
|
|
|
|
if (gst_structure_has_field (structure, "interlaced"))
|
|
gst_structure_fixate_field_boolean (structure, "interlaced", FALSE);
|
|
if (gst_structure_has_field (structure, "color-matrix"))
|
|
gst_structure_fixate_field_string (structure, "color-matrix", "sdtv");
|
|
if (gst_structure_has_field (structure, "chroma-site"))
|
|
gst_structure_fixate_field_string (structure, "chroma-site", "mpeg2");
|
|
if (gst_structure_has_field (structure, "pixel-aspect-ratio"))
|
|
gst_structure_fixate_field_nearest_fraction (structure,
|
|
"pixel-aspect-ratio", 1, 1);
|
|
|
|
ret = gst_video_rate_set_src_caps (videorate, caps);
|
|
gst_caps_unref (caps);
|
|
}
|
|
done:
|
|
/* After a setcaps, our caps may have changed. In that case, we can't use
|
|
* the old buffer, if there was one (it might have different dimensions) */
|
|
GST_DEBUG_OBJECT (videorate, "swapping old buffers");
|
|
gst_video_rate_swap_prev (videorate, NULL, GST_CLOCK_TIME_NONE);
|
|
|
|
return ret;
|
|
|
|
no_framerate:
|
|
{
|
|
GST_DEBUG_OBJECT (videorate, "no framerate specified");
|
|
goto done;
|
|
}
|
|
no_transform:
|
|
{
|
|
GST_DEBUG_OBJECT (videorate, "no framerate transform possible");
|
|
ret = FALSE;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_reset (GstVideoRate * videorate)
|
|
{
|
|
GST_DEBUG_OBJECT (videorate, "resetting internal variables");
|
|
|
|
videorate->in = 0;
|
|
videorate->out = 0;
|
|
videorate->base_ts = 0;
|
|
videorate->out_frame_count = 0;
|
|
videorate->drop = 0;
|
|
videorate->dup = 0;
|
|
videorate->next_ts = GST_CLOCK_TIME_NONE;
|
|
videorate->last_ts = GST_CLOCK_TIME_NONE;
|
|
videorate->discont = TRUE;
|
|
videorate->average = 0;
|
|
gst_video_rate_swap_prev (videorate, NULL, 0);
|
|
|
|
gst_segment_init (&videorate->segment, GST_FORMAT_TIME);
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_init (GstVideoRate * videorate)
|
|
{
|
|
videorate->sinkpad =
|
|
gst_pad_new_from_static_template (&gst_video_rate_sink_template, "sink");
|
|
gst_pad_set_event_function (videorate->sinkpad,
|
|
GST_DEBUG_FUNCPTR (gst_video_rate_event));
|
|
gst_pad_set_chain_function (videorate->sinkpad,
|
|
GST_DEBUG_FUNCPTR (gst_video_rate_chain));
|
|
gst_pad_set_getcaps_function (videorate->sinkpad,
|
|
GST_DEBUG_FUNCPTR (gst_video_rate_getcaps));
|
|
gst_element_add_pad (GST_ELEMENT (videorate), videorate->sinkpad);
|
|
|
|
videorate->srcpad =
|
|
gst_pad_new_from_static_template (&gst_video_rate_src_template, "src");
|
|
gst_pad_set_query_function (videorate->srcpad,
|
|
GST_DEBUG_FUNCPTR (gst_video_rate_query));
|
|
gst_pad_set_getcaps_function (videorate->srcpad,
|
|
GST_DEBUG_FUNCPTR (gst_video_rate_getcaps));
|
|
gst_element_add_pad (GST_ELEMENT (videorate), videorate->srcpad);
|
|
|
|
gst_video_rate_reset (videorate);
|
|
videorate->silent = DEFAULT_SILENT;
|
|
videorate->new_pref = DEFAULT_NEW_PREF;
|
|
videorate->drop_only = DEFAULT_DROP_ONLY;
|
|
videorate->average_period = DEFAULT_AVERAGE_PERIOD;
|
|
|
|
videorate->from_rate_numerator = 0;
|
|
videorate->from_rate_denominator = 0;
|
|
videorate->to_rate_numerator = 0;
|
|
videorate->to_rate_denominator = 0;
|
|
}
|
|
|
|
/* flush the oldest buffer */
|
|
static GstFlowReturn
|
|
gst_video_rate_flush_prev (GstVideoRate * videorate, gboolean duplicate)
|
|
{
|
|
GstFlowReturn res;
|
|
GstBuffer *outbuf;
|
|
GstClockTime push_ts;
|
|
|
|
if (!videorate->prevbuf)
|
|
goto eos_before_buffers;
|
|
|
|
/* make sure we can write to the metadata */
|
|
outbuf = gst_buffer_make_writable (gst_buffer_ref (videorate->prevbuf));
|
|
|
|
GST_BUFFER_OFFSET (outbuf) = videorate->out;
|
|
GST_BUFFER_OFFSET_END (outbuf) = videorate->out + 1;
|
|
|
|
if (videorate->discont) {
|
|
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
|
|
videorate->discont = FALSE;
|
|
} else
|
|
GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_DISCONT);
|
|
|
|
if (duplicate)
|
|
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
|
|
else
|
|
GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);
|
|
|
|
/* this is the timestamp we put on the buffer */
|
|
push_ts = videorate->next_ts;
|
|
|
|
videorate->out++;
|
|
videorate->out_frame_count++;
|
|
if (videorate->to_rate_numerator) {
|
|
/* interpolate next expected timestamp in the segment */
|
|
videorate->next_ts =
|
|
videorate->segment.base + videorate->segment.start +
|
|
videorate->base_ts + gst_util_uint64_scale (videorate->out_frame_count,
|
|
videorate->to_rate_denominator * GST_SECOND,
|
|
videorate->to_rate_numerator);
|
|
GST_BUFFER_DURATION (outbuf) = videorate->next_ts - push_ts;
|
|
}
|
|
|
|
/* We do not need to update time in VFR (variable frame rate) mode */
|
|
if (!videorate->drop_only) {
|
|
/* adapt for looping, bring back to time in current segment. */
|
|
GST_BUFFER_TIMESTAMP (outbuf) = push_ts - videorate->segment.base;
|
|
}
|
|
|
|
GST_LOG_OBJECT (videorate,
|
|
"old is best, dup, pushing buffer outgoing ts %" GST_TIME_FORMAT,
|
|
GST_TIME_ARGS (push_ts));
|
|
|
|
res = gst_pad_push (videorate->srcpad, outbuf);
|
|
|
|
return res;
|
|
|
|
/* WARNINGS */
|
|
eos_before_buffers:
|
|
{
|
|
GST_INFO_OBJECT (videorate, "got EOS before any buffer was received");
|
|
return GST_FLOW_OK;
|
|
}
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_swap_prev (GstVideoRate * videorate, GstBuffer * buffer,
|
|
gint64 time)
|
|
{
|
|
GST_LOG_OBJECT (videorate, "swap_prev: storing buffer %p in prev", buffer);
|
|
if (videorate->prevbuf)
|
|
gst_buffer_unref (videorate->prevbuf);
|
|
videorate->prevbuf = buffer;
|
|
videorate->prev_ts = time;
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_notify_drop (GstVideoRate * videorate)
|
|
{
|
|
#if !GLIB_CHECK_VERSION(2,26,0)
|
|
g_object_notify ((GObject *) videorate, "drop");
|
|
#else
|
|
g_object_notify_by_pspec ((GObject *) videorate, pspec_drop);
|
|
#endif
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_notify_duplicate (GstVideoRate * videorate)
|
|
{
|
|
#if !GLIB_CHECK_VERSION(2,26,0)
|
|
g_object_notify ((GObject *) videorate, "duplicate");
|
|
#else
|
|
g_object_notify_by_pspec ((GObject *) videorate, pspec_duplicate);
|
|
#endif
|
|
}
|
|
|
|
#define MAGIC_LIMIT 25
|
|
static gboolean
|
|
gst_video_rate_event (GstPad * pad, GstEvent * event)
|
|
{
|
|
GstVideoRate *videorate;
|
|
gboolean ret;
|
|
|
|
videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad));
|
|
|
|
switch (GST_EVENT_TYPE (event)) {
|
|
case GST_EVENT_CAPS:
|
|
{
|
|
GstCaps *caps;
|
|
|
|
gst_event_parse_caps (event, &caps);
|
|
ret = gst_video_rate_set_sink_caps (videorate, caps);
|
|
gst_event_unref (event);
|
|
|
|
/* don't forward */
|
|
goto done;
|
|
}
|
|
case GST_EVENT_SEGMENT:
|
|
{
|
|
const GstSegment *segment;
|
|
|
|
gst_event_parse_segment (event, &segment);
|
|
|
|
if (segment->format != GST_FORMAT_TIME)
|
|
goto format_error;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "handle NEWSEGMENT");
|
|
|
|
/* close up the previous segment, if appropriate */
|
|
if (videorate->prevbuf) {
|
|
gint count = 0;
|
|
GstFlowReturn res;
|
|
|
|
res = GST_FLOW_OK;
|
|
/* fill up to the end of current segment,
|
|
* or only send out the stored buffer if there is no specific stop.
|
|
* regardless, prevent going loopy in strange cases */
|
|
while (res == GST_FLOW_OK && count <= MAGIC_LIMIT &&
|
|
((GST_CLOCK_TIME_IS_VALID (videorate->segment.stop) &&
|
|
videorate->next_ts - videorate->segment.base
|
|
< videorate->segment.stop)
|
|
|| count < 1)) {
|
|
res = gst_video_rate_flush_prev (videorate, count > 0);
|
|
count++;
|
|
}
|
|
if (count > 1) {
|
|
videorate->dup += count - 1;
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_duplicate (videorate);
|
|
} else if (count == 0) {
|
|
videorate->drop++;
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_drop (videorate);
|
|
}
|
|
/* clean up for the new one; _chain will resume from the new start */
|
|
videorate->base_ts = 0;
|
|
videorate->out_frame_count = 0;
|
|
gst_video_rate_swap_prev (videorate, NULL, 0);
|
|
videorate->next_ts = GST_CLOCK_TIME_NONE;
|
|
}
|
|
|
|
/* We just want to update the accumulated stream_time */
|
|
gst_segment_copy_into (segment, &videorate->segment);
|
|
|
|
GST_DEBUG_OBJECT (videorate, "updated segment: %" GST_SEGMENT_FORMAT,
|
|
&videorate->segment);
|
|
break;
|
|
}
|
|
case GST_EVENT_EOS:{
|
|
gint count = 0;
|
|
GstFlowReturn res = GST_FLOW_OK;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "Got EOS");
|
|
|
|
/* If the segment has a stop position, fill the segment */
|
|
if (GST_CLOCK_TIME_IS_VALID (videorate->segment.stop)) {
|
|
/* fill up to the end of current segment,
|
|
* or only send out the stored buffer if there is no specific stop.
|
|
* regardless, prevent going loopy in strange cases */
|
|
while (res == GST_FLOW_OK && count <= MAGIC_LIMIT &&
|
|
((videorate->next_ts - videorate->segment.base <
|
|
videorate->segment.stop)
|
|
|| count < 1)) {
|
|
res = gst_video_rate_flush_prev (videorate, count > 0);
|
|
count++;
|
|
}
|
|
} else if (videorate->prevbuf) {
|
|
/* Output at least one frame but if the buffer duration is valid, output
|
|
* enough frames to use the complete buffer duration */
|
|
if (GST_BUFFER_DURATION_IS_VALID (videorate->prevbuf)) {
|
|
GstClockTime end_ts =
|
|
videorate->next_ts + GST_BUFFER_DURATION (videorate->prevbuf);
|
|
|
|
while (res == GST_FLOW_OK && count <= MAGIC_LIMIT &&
|
|
((videorate->next_ts - videorate->segment.base < end_ts)
|
|
|| count < 1)) {
|
|
res = gst_video_rate_flush_prev (videorate, count > 0);
|
|
count++;
|
|
}
|
|
} else {
|
|
res = gst_video_rate_flush_prev (videorate, FALSE);
|
|
count = 1;
|
|
}
|
|
}
|
|
|
|
if (count > 1) {
|
|
videorate->dup += count - 1;
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_duplicate (videorate);
|
|
} else if (count == 0) {
|
|
videorate->drop++;
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_drop (videorate);
|
|
}
|
|
|
|
break;
|
|
}
|
|
case GST_EVENT_FLUSH_STOP:
|
|
/* also resets the segment */
|
|
GST_DEBUG_OBJECT (videorate, "Got FLUSH_STOP");
|
|
gst_video_rate_reset (videorate);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
ret = gst_pad_push_event (videorate->srcpad, event);
|
|
|
|
done:
|
|
gst_object_unref (videorate);
|
|
|
|
return ret;
|
|
|
|
/* ERRORS */
|
|
format_error:
|
|
{
|
|
GST_WARNING_OBJECT (videorate,
|
|
"Got segment but doesn't have GST_FORMAT_TIME value");
|
|
gst_event_unref (event);
|
|
ret = FALSE;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
static gboolean
|
|
gst_video_rate_query (GstPad * pad, GstQuery * query)
|
|
{
|
|
GstVideoRate *videorate;
|
|
gboolean res = FALSE;
|
|
|
|
videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad));
|
|
|
|
switch (GST_QUERY_TYPE (query)) {
|
|
case GST_QUERY_LATENCY:
|
|
{
|
|
GstClockTime min, max;
|
|
gboolean live;
|
|
guint64 latency;
|
|
GstPad *peer;
|
|
|
|
if ((peer = gst_pad_get_peer (videorate->sinkpad))) {
|
|
if ((res = gst_pad_query (peer, query))) {
|
|
gst_query_parse_latency (query, &live, &min, &max);
|
|
|
|
GST_DEBUG_OBJECT (videorate, "Peer latency: min %"
|
|
GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
|
|
GST_TIME_ARGS (min), GST_TIME_ARGS (max));
|
|
|
|
if (videorate->from_rate_numerator != 0) {
|
|
/* add latency. We don't really know since we hold on to the frames
|
|
* until we get a next frame, which can be anything. We assume
|
|
* however that this will take from_rate time. */
|
|
latency = gst_util_uint64_scale (GST_SECOND,
|
|
videorate->from_rate_denominator,
|
|
videorate->from_rate_numerator);
|
|
} else {
|
|
/* no input framerate, we don't know */
|
|
latency = 0;
|
|
}
|
|
|
|
GST_DEBUG_OBJECT (videorate, "Our latency: %"
|
|
GST_TIME_FORMAT, GST_TIME_ARGS (latency));
|
|
|
|
min += latency;
|
|
if (max != -1)
|
|
max += latency;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "Calculated total latency : min %"
|
|
GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
|
|
GST_TIME_ARGS (min), GST_TIME_ARGS (max));
|
|
|
|
gst_query_set_latency (query, live, min, max);
|
|
}
|
|
gst_object_unref (peer);
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
res = gst_pad_query_default (pad, query);
|
|
break;
|
|
}
|
|
gst_object_unref (videorate);
|
|
|
|
return res;
|
|
}
|
|
|
|
static GstFlowReturn
|
|
gst_video_rate_chain_max_avg (GstVideoRate * videorate, GstBuffer * buf)
|
|
{
|
|
GstClockTime ts = GST_BUFFER_TIMESTAMP (buf);
|
|
|
|
videorate->in++;
|
|
|
|
if (!GST_CLOCK_TIME_IS_VALID (ts) || videorate->wanted_diff == 0)
|
|
goto push;
|
|
|
|
/* drop frames if they exceed our output rate */
|
|
if (GST_CLOCK_TIME_IS_VALID (videorate->last_ts)) {
|
|
GstClockTimeDiff diff = ts - videorate->last_ts;
|
|
|
|
/* Drop buffer if its early compared to the desired frame rate and
|
|
* the current average is higher than the desired average
|
|
*/
|
|
if (diff < videorate->wanted_diff &&
|
|
videorate->average < videorate->wanted_diff)
|
|
goto drop;
|
|
|
|
/* Update average */
|
|
if (videorate->average) {
|
|
GstClockTimeDiff wanted_diff;
|
|
|
|
if (G_LIKELY (videorate->average_period > videorate->wanted_diff))
|
|
wanted_diff = videorate->wanted_diff;
|
|
else
|
|
wanted_diff = videorate->average_period * 10;
|
|
|
|
videorate->average =
|
|
gst_util_uint64_scale_round (videorate->average,
|
|
videorate->average_period - wanted_diff,
|
|
videorate->average_period) +
|
|
gst_util_uint64_scale_round (diff, wanted_diff,
|
|
videorate->average_period);
|
|
} else {
|
|
videorate->average = diff;
|
|
}
|
|
}
|
|
|
|
videorate->last_ts = ts;
|
|
|
|
push:
|
|
videorate->out++;
|
|
|
|
return gst_pad_push (videorate->srcpad, buf);
|
|
|
|
drop:
|
|
gst_buffer_unref (buf);
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_drop (videorate);
|
|
return GST_FLOW_OK;
|
|
}
|
|
|
|
static GstFlowReturn
|
|
gst_video_rate_chain (GstPad * pad, GstBuffer * buffer)
|
|
{
|
|
GstVideoRate *videorate;
|
|
GstFlowReturn res = GST_FLOW_OK;
|
|
GstClockTime intime, in_ts, in_dur;
|
|
GstClockTime avg_period;
|
|
gboolean skip = FALSE;
|
|
|
|
videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad));
|
|
|
|
/* make sure the denominators are not 0 */
|
|
if (videorate->from_rate_denominator == 0 ||
|
|
videorate->to_rate_denominator == 0)
|
|
goto not_negotiated;
|
|
|
|
GST_OBJECT_LOCK (videorate);
|
|
avg_period = videorate->average_period_set;
|
|
GST_OBJECT_UNLOCK (videorate);
|
|
|
|
/* MT-safe switching between modes */
|
|
if (G_UNLIKELY (avg_period != videorate->average_period)) {
|
|
videorate->average_period = avg_period;
|
|
videorate->last_ts = GST_CLOCK_TIME_NONE;
|
|
if (avg_period && !videorate->average) {
|
|
/* enabling average mode */
|
|
videorate->average = 0;
|
|
} else {
|
|
/* enable regular mode */
|
|
gst_video_rate_swap_prev (videorate, NULL, 0);
|
|
/* arrange for skip-to-first behaviour */
|
|
videorate->next_ts = GST_CLOCK_TIME_NONE;
|
|
skip = TRUE;
|
|
}
|
|
}
|
|
|
|
if (videorate->average_period > 0)
|
|
return gst_video_rate_chain_max_avg (videorate, buffer);
|
|
|
|
in_ts = GST_BUFFER_TIMESTAMP (buffer);
|
|
in_dur = GST_BUFFER_DURATION (buffer);
|
|
|
|
if (G_UNLIKELY (in_ts == GST_CLOCK_TIME_NONE)) {
|
|
in_ts = videorate->last_ts;
|
|
if (G_UNLIKELY (in_ts == GST_CLOCK_TIME_NONE))
|
|
goto invalid_buffer;
|
|
}
|
|
|
|
/* get the time of the next expected buffer timestamp, we use this when the
|
|
* next buffer has -1 as a timestamp */
|
|
videorate->last_ts = in_ts;
|
|
if (in_dur != GST_CLOCK_TIME_NONE)
|
|
videorate->last_ts += in_dur;
|
|
|
|
GST_DEBUG_OBJECT (videorate, "got buffer with timestamp %" GST_TIME_FORMAT,
|
|
GST_TIME_ARGS (in_ts));
|
|
|
|
/* the input time is the time in the segment + all previously accumulated
|
|
* segments */
|
|
intime = in_ts + videorate->segment.base;
|
|
|
|
/* we need to have two buffers to compare */
|
|
if (videorate->prevbuf == NULL) {
|
|
gst_video_rate_swap_prev (videorate, buffer, intime);
|
|
videorate->in++;
|
|
if (!GST_CLOCK_TIME_IS_VALID (videorate->next_ts)) {
|
|
/* new buffer, we expect to output a buffer that matches the first
|
|
* timestamp in the segment */
|
|
if (videorate->skip_to_first || skip) {
|
|
videorate->next_ts = intime;
|
|
videorate->base_ts = in_ts - videorate->segment.start;
|
|
videorate->out_frame_count = 0;
|
|
} else {
|
|
videorate->next_ts = videorate->segment.start + videorate->segment.base;
|
|
}
|
|
}
|
|
} else {
|
|
GstClockTime prevtime;
|
|
gint count = 0;
|
|
gint64 diff1, diff2;
|
|
|
|
prevtime = videorate->prev_ts;
|
|
|
|
GST_LOG_OBJECT (videorate,
|
|
"BEGINNING prev buf %" GST_TIME_FORMAT " new buf %" GST_TIME_FORMAT
|
|
" outgoing ts %" GST_TIME_FORMAT, GST_TIME_ARGS (prevtime),
|
|
GST_TIME_ARGS (intime), GST_TIME_ARGS (videorate->next_ts));
|
|
|
|
videorate->in++;
|
|
|
|
/* drop new buffer if it's before previous one */
|
|
if (intime < prevtime) {
|
|
GST_DEBUG_OBJECT (videorate,
|
|
"The new buffer (%" GST_TIME_FORMAT
|
|
") is before the previous buffer (%"
|
|
GST_TIME_FORMAT "). Dropping new buffer.",
|
|
GST_TIME_ARGS (intime), GST_TIME_ARGS (prevtime));
|
|
videorate->drop++;
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_drop (videorate);
|
|
gst_buffer_unref (buffer);
|
|
goto done;
|
|
}
|
|
|
|
/* got 2 buffers, see which one is the best */
|
|
do {
|
|
|
|
diff1 = prevtime - videorate->next_ts;
|
|
diff2 = intime - videorate->next_ts;
|
|
|
|
/* take absolute values, beware: abs and ABS don't work for gint64 */
|
|
if (diff1 < 0)
|
|
diff1 = -diff1;
|
|
if (diff2 < 0)
|
|
diff2 = -diff2;
|
|
|
|
GST_LOG_OBJECT (videorate,
|
|
"diff with prev %" GST_TIME_FORMAT " diff with new %"
|
|
GST_TIME_FORMAT " outgoing ts %" GST_TIME_FORMAT,
|
|
GST_TIME_ARGS (diff1), GST_TIME_ARGS (diff2),
|
|
GST_TIME_ARGS (videorate->next_ts));
|
|
|
|
/* output first one when its the best */
|
|
if (diff1 <= diff2) {
|
|
count++;
|
|
|
|
/* on error the _flush function posted a warning already */
|
|
if ((res =
|
|
gst_video_rate_flush_prev (videorate,
|
|
count > 1)) != GST_FLOW_OK) {
|
|
gst_buffer_unref (buffer);
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
/* Do not produce any dups. We can exit loop now */
|
|
if (videorate->drop_only)
|
|
break;
|
|
/* continue while the first one was the best, if they were equal avoid
|
|
* going into an infinite loop */
|
|
}
|
|
while (diff1 < diff2);
|
|
|
|
/* if we outputed the first buffer more then once, we have dups */
|
|
if (count > 1) {
|
|
videorate->dup += count - 1;
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_duplicate (videorate);
|
|
}
|
|
/* if we didn't output the first buffer, we have a drop */
|
|
else if (count == 0) {
|
|
videorate->drop++;
|
|
|
|
if (!videorate->silent)
|
|
gst_video_rate_notify_drop (videorate);
|
|
|
|
GST_LOG_OBJECT (videorate,
|
|
"new is best, old never used, drop, outgoing ts %"
|
|
GST_TIME_FORMAT, GST_TIME_ARGS (videorate->next_ts));
|
|
}
|
|
GST_LOG_OBJECT (videorate,
|
|
"END, putting new in old, diff1 %" GST_TIME_FORMAT
|
|
", diff2 %" GST_TIME_FORMAT ", next_ts %" GST_TIME_FORMAT
|
|
", in %" G_GUINT64_FORMAT ", out %" G_GUINT64_FORMAT ", drop %"
|
|
G_GUINT64_FORMAT ", dup %" G_GUINT64_FORMAT, GST_TIME_ARGS (diff1),
|
|
GST_TIME_ARGS (diff2), GST_TIME_ARGS (videorate->next_ts),
|
|
videorate->in, videorate->out, videorate->drop, videorate->dup);
|
|
|
|
/* swap in new one when it's the best */
|
|
gst_video_rate_swap_prev (videorate, buffer, intime);
|
|
}
|
|
done:
|
|
return res;
|
|
|
|
/* ERRORS */
|
|
not_negotiated:
|
|
{
|
|
GST_WARNING_OBJECT (videorate, "no framerate negotiated");
|
|
gst_buffer_unref (buffer);
|
|
res = GST_FLOW_NOT_NEGOTIATED;
|
|
goto done;
|
|
}
|
|
|
|
invalid_buffer:
|
|
{
|
|
GST_WARNING_OBJECT (videorate,
|
|
"Got buffer with GST_CLOCK_TIME_NONE timestamp, discarding it");
|
|
gst_buffer_unref (buffer);
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_set_property (GObject * object,
|
|
guint prop_id, const GValue * value, GParamSpec * pspec)
|
|
{
|
|
GstVideoRate *videorate = GST_VIDEO_RATE (object);
|
|
|
|
GST_OBJECT_LOCK (videorate);
|
|
switch (prop_id) {
|
|
case ARG_SILENT:
|
|
videorate->silent = g_value_get_boolean (value);
|
|
break;
|
|
case ARG_NEW_PREF:
|
|
videorate->new_pref = g_value_get_double (value);
|
|
break;
|
|
case ARG_SKIP_TO_FIRST:
|
|
videorate->skip_to_first = g_value_get_boolean (value);
|
|
break;
|
|
case ARG_DROP_ONLY:
|
|
videorate->drop_only = g_value_get_boolean (value);
|
|
break;
|
|
case ARG_AVERAGE_PERIOD:
|
|
videorate->average_period = g_value_get_uint64 (value);
|
|
break;
|
|
default:
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
break;
|
|
}
|
|
GST_OBJECT_UNLOCK (videorate);
|
|
}
|
|
|
|
static void
|
|
gst_video_rate_get_property (GObject * object,
|
|
guint prop_id, GValue * value, GParamSpec * pspec)
|
|
{
|
|
GstVideoRate *videorate = GST_VIDEO_RATE (object);
|
|
|
|
GST_OBJECT_LOCK (videorate);
|
|
switch (prop_id) {
|
|
case ARG_IN:
|
|
g_value_set_uint64 (value, videorate->in);
|
|
break;
|
|
case ARG_OUT:
|
|
g_value_set_uint64 (value, videorate->out);
|
|
break;
|
|
case ARG_DUP:
|
|
g_value_set_uint64 (value, videorate->dup);
|
|
break;
|
|
case ARG_DROP:
|
|
g_value_set_uint64 (value, videorate->drop);
|
|
break;
|
|
case ARG_SILENT:
|
|
g_value_set_boolean (value, videorate->silent);
|
|
break;
|
|
case ARG_NEW_PREF:
|
|
g_value_set_double (value, videorate->new_pref);
|
|
break;
|
|
case ARG_SKIP_TO_FIRST:
|
|
g_value_set_boolean (value, videorate->skip_to_first);
|
|
break;
|
|
case ARG_DROP_ONLY:
|
|
g_value_set_boolean (value, videorate->drop_only);
|
|
break;
|
|
case ARG_AVERAGE_PERIOD:
|
|
g_value_set_uint64 (value, videorate->average_period);
|
|
break;
|
|
default:
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
|
|
break;
|
|
}
|
|
GST_OBJECT_UNLOCK (videorate);
|
|
}
|
|
|
|
static GstStateChangeReturn
|
|
gst_video_rate_change_state (GstElement * element, GstStateChange transition)
|
|
{
|
|
GstStateChangeReturn ret;
|
|
GstVideoRate *videorate;
|
|
|
|
videorate = GST_VIDEO_RATE (element);
|
|
|
|
switch (transition) {
|
|
case GST_STATE_CHANGE_READY_TO_PAUSED:
|
|
videorate->discont = TRUE;
|
|
videorate->last_ts = -1;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
|
|
|
|
switch (transition) {
|
|
case GST_STATE_CHANGE_PAUSED_TO_READY:
|
|
gst_video_rate_reset (videorate);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static gboolean
|
|
plugin_init (GstPlugin * plugin)
|
|
{
|
|
GST_DEBUG_CATEGORY_INIT (video_rate_debug, "videorate", 0,
|
|
"VideoRate stream fixer");
|
|
|
|
return gst_element_register (plugin, "videorate", GST_RANK_NONE,
|
|
GST_TYPE_VIDEO_RATE);
|
|
}
|
|
|
|
GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
|
|
GST_VERSION_MINOR,
|
|
"videorate",
|
|
"Adjusts video frames",
|
|
plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
|