videomark/detect: port to 1.0

This commit is contained in:
David Schleef 2013-04-13 15:16:04 -07:00
parent 4cfa3f5af3
commit b3ff0630e8
6 changed files with 649 additions and 691 deletions

View file

@ -2,12 +2,11 @@ plugin_LTLIBRARIES = libgstvideosignal.la
libgstvideosignal_la_SOURCES = gstvideosignal.c \
gstvideoanalyse.c \
gstvideoanalyse.h
# gstvideodetect.c \
# gstvideodetect.h \
# gstvideomark.c \
# gstvideomark.h
gstvideoanalyse.h \
gstvideodetect.c \
gstvideodetect.h \
gstvideomark.c \
gstvideomark.h
libgstvideosignal_la_CFLAGS = $(GST_CFLAGS) $(GST_BASE_CFLAGS) \
$(GST_PLUGINS_BASE_CFLAGS)

View file

@ -13,10 +13,9 @@
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
* Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
* Boston, MA 02110-1335, USA.
*/
/**
* SECTION:element-videodetect
* @see_also: #GstVideoMark
@ -79,13 +78,6 @@
* <listitem>
* <para>
* #guint64
* <classname>&quot;data-uint64&quot;</classname>:
* the data-pattern found after the pattern or 0 when have-signal is #FALSE.
* </para>
* </listitem>
* <listitem>
* <para>
* #guint
* <classname>&quot;data&quot;</classname>:
* the data-pattern found after the pattern or 0 when have-signal is #FALSE.
* </para>
@ -106,24 +98,31 @@
#include "config.h"
#endif
#include <gst/gst.h>
#include <gst/video/video.h>
#include <gst/video/gstvideofilter.h>
#include "gstvideodetect.h"
#include <string.h>
#include <math.h>
GST_DEBUG_CATEGORY_STATIC (gst_video_detect_debug_category);
#define GST_CAT_DEFAULT gst_video_detect_debug_category
#include <gst/video/video.h>
/* prototypes */
/* GstVideoDetect signals and args */
#define DEFAULT_MESSAGE TRUE
#define DEFAULT_PATTERN_WIDTH 4
#define DEFAULT_PATTERN_HEIGHT 16
#define DEFAULT_PATTERN_COUNT 4
#define DEFAULT_PATTERN_DATA_COUNT 5
#define DEFAULT_PATTERN_CENTER 0.5
#define DEFAULT_PATTERN_SENSITIVITY 0.3
#define DEFAULT_LEFT_OFFSET 0
#define DEFAULT_BOTTOM_OFFSET 0
static void gst_video_detect_set_property (GObject * object,
guint property_id, const GValue * value, GParamSpec * pspec);
static void gst_video_detect_get_property (GObject * object,
guint property_id, GValue * value, GParamSpec * pspec);
static void gst_video_detect_dispose (GObject * object);
static void gst_video_detect_finalize (GObject * object);
static gboolean gst_video_detect_start (GstBaseTransform * trans);
static gboolean gst_video_detect_stop (GstBaseTransform * trans);
static gboolean gst_video_detect_set_info (GstVideoFilter * filter,
GstCaps * incaps, GstVideoInfo * in_info, GstCaps * outcaps,
GstVideoInfo * out_info);
static GstFlowReturn gst_video_detect_transform_frame_ip (GstVideoFilter *
filter, GstVideoFrame * frame);
enum
{
@ -139,328 +138,62 @@ enum
PROP_BOTTOM_OFFSET
};
GST_DEBUG_CATEGORY_STATIC (video_detect_debug);
#define GST_CAT_DEFAULT video_detect_debug
#define DEFAULT_MESSAGE TRUE
#define DEFAULT_PATTERN_WIDTH 4
#define DEFAULT_PATTERN_HEIGHT 16
#define DEFAULT_PATTERN_COUNT 4
#define DEFAULT_PATTERN_DATA_COUNT 5
#define DEFAULT_PATTERN_CENTER 0.5
#define DEFAULT_PATTERN_SENSITIVITY 0.3
#define DEFAULT_LEFT_OFFSET 0
#define DEFAULT_BOTTOM_OFFSET 0
static GstStaticPadTemplate gst_video_detect_src_template =
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV
("{ I420, YV12, Y41B, Y42B, Y444, YUY2, UYVY, AYUV, YVYU }"))
);
/* pad templates */
static GstStaticPadTemplate gst_video_detect_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV
("{ I420, YV12, Y41B, Y42B, Y444, YUY2, UYVY, AYUV, YVYU }"))
);
#define VIDEO_CAPS \
GST_VIDEO_CAPS_MAKE( \
"{ I420, YV12, Y41B, Y42B, Y444, YUY2, UYVY, AYUV, YVYU }")
static GstVideoFilterClass *parent_class = NULL;
static gboolean
gst_video_detect_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
GstCaps * outcaps)
{
GstVideoDetect *vf;
GstStructure *in_s;
guint32 fourcc;
gboolean ret;
/* class initialization */
vf = GST_VIDEO_DETECT (btrans);
in_s = gst_caps_get_structure (incaps, 0);
ret = gst_structure_get_int (in_s, "width", &vf->width);
ret &= gst_structure_get_int (in_s, "height", &vf->height);
ret &= gst_structure_get_fourcc (in_s, "format", &fourcc);
if (ret)
vf->format = gst_video_format_from_fourcc (fourcc);
return ret;
}
G_DEFINE_TYPE_WITH_CODE (GstVideoDetect, gst_video_detect,
GST_TYPE_VIDEO_FILTER,
GST_DEBUG_CATEGORY_INIT (gst_video_detect_debug_category, "videodetect", 0,
"debug category for videodetect element"));
static void
gst_video_detect_post_message (GstVideoDetect * videodetect, GstBuffer * buffer,
guint64 data)
gst_video_detect_class_init (GstVideoDetectClass * klass)
{
GstBaseTransform *trans;
GstMessage *m;
guint64 duration, timestamp, running_time, stream_time;
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
GstBaseTransformClass *base_transform_class =
GST_BASE_TRANSFORM_CLASS (klass);
GstVideoFilterClass *video_filter_class = GST_VIDEO_FILTER_CLASS (klass);
trans = GST_BASE_TRANSFORM_CAST (videodetect);
gst_element_class_add_pad_template (GST_ELEMENT_CLASS (klass),
gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
gst_caps_from_string (VIDEO_CAPS)));
gst_element_class_add_pad_template (GST_ELEMENT_CLASS (klass),
gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
gst_caps_from_string (VIDEO_CAPS)));
/* get timestamps */
timestamp = GST_BUFFER_TIMESTAMP (buffer);
duration = GST_BUFFER_DURATION (buffer);
running_time = gst_segment_to_running_time (&trans->segment, GST_FORMAT_TIME,
timestamp);
stream_time = gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME,
timestamp);
/* post message */
m = gst_message_new_element (GST_OBJECT_CAST (videodetect),
gst_structure_new ("GstVideoDetect",
"have-pattern", G_TYPE_BOOLEAN, videodetect->in_pattern,
"timestamp", G_TYPE_UINT64, timestamp,
"stream-time", G_TYPE_UINT64, stream_time,
"running-time", G_TYPE_UINT64, running_time,
"duration", G_TYPE_UINT64, duration,
"data-uint64", G_TYPE_UINT64, data,
"data", G_TYPE_UINT, (guint) MIN (data, G_MAXINT), NULL));
gst_element_post_message (GST_ELEMENT_CAST (videodetect), m);
}
static gdouble
gst_video_detect_calc_brightness (GstVideoDetect * videodetect, guint8 * data,
gint width, gint height, gint row_stride, gint pixel_stride)
{
gint i, j;
guint64 sum;
sum = 0;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
sum += data[pixel_stride * j];
}
data += row_stride;
}
return sum / (255.0 * width * height);
}
static void
gst_video_detect_yuv (GstVideoDetect * videodetect, GstBuffer * buffer)
{
GstVideoFormat format;
gdouble brightness;
gint i, pw, ph, row_stride, pixel_stride, offset;
gint width, height, req_width, req_height;
guint8 *d, *data;
guint64 pattern_data;
data = GST_BUFFER_DATA (buffer);
format = videodetect->format;
width = videodetect->width;
height = videodetect->height;
pw = videodetect->pattern_width;
ph = videodetect->pattern_height;
row_stride = gst_video_format_get_row_stride (format, 0, width);
pixel_stride = gst_video_format_get_pixel_stride (format, 0);
offset = gst_video_format_get_component_offset (format, 0, width, height);
req_width =
(videodetect->pattern_count + videodetect->pattern_data_count) * pw +
videodetect->left_offset;
req_height = videodetect->bottom_offset + ph;
if (req_width > width || req_height > height) {
goto no_pattern;
}
/* analyse the bottom left pixels */
for (i = 0; i < videodetect->pattern_count; i++) {
d = data + offset;
/* move to start of bottom left, adjust for offsets */
d += row_stride * (height - ph - videodetect->bottom_offset) +
pixel_stride * videodetect->left_offset;
/* move to i-th pattern */
d += pixel_stride * pw * i;
/* calc brightness of width * height box */
brightness = gst_video_detect_calc_brightness (videodetect, d, pw, ph,
row_stride, pixel_stride);
GST_DEBUG_OBJECT (videodetect, "brightness %f", brightness);
if (i & 1) {
/* odd pixels must be white, all pixels darker than the center +
* sensitivity are considered wrong. */
if (brightness <
(videodetect->pattern_center + videodetect->pattern_sensitivity))
goto no_pattern;
} else {
/* even pixels must be black, pixels lighter than the center - sensitivity
* are considered wrong. */
if (brightness >
(videodetect->pattern_center - videodetect->pattern_sensitivity))
goto no_pattern;
}
}
GST_DEBUG_OBJECT (videodetect, "found pattern");
pattern_data = 0;
/* get the data of the pattern */
for (i = 0; i < videodetect->pattern_data_count; i++) {
d = data + offset;
/* move to start of bottom left, adjust for offsets */
d += row_stride * (height - ph - videodetect->bottom_offset) +
pixel_stride * videodetect->left_offset;
/* move after the fixed pattern */
d += pixel_stride * (videodetect->pattern_count * pw);
/* move to i-th pattern data */
d += pixel_stride * pw * i;
/* calc brightness of width * height box */
brightness = gst_video_detect_calc_brightness (videodetect, d, pw, ph,
row_stride, pixel_stride);
/* update pattern, we just use the center to decide between black and white. */
pattern_data <<= 1;
if (brightness > videodetect->pattern_center)
pattern_data |= 1;
}
GST_DEBUG_OBJECT (videodetect, "have data %" G_GUINT64_FORMAT, pattern_data);
videodetect->in_pattern = TRUE;
gst_video_detect_post_message (videodetect, buffer, pattern_data);
return;
no_pattern:
{
GST_DEBUG_OBJECT (videodetect, "no pattern found");
if (videodetect->in_pattern) {
videodetect->in_pattern = FALSE;
gst_video_detect_post_message (videodetect, buffer, 0);
}
return;
}
}
static GstFlowReturn
gst_video_detect_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
{
GstVideoDetect *videodetect;
GstFlowReturn ret = GST_FLOW_OK;
videodetect = GST_VIDEO_DETECT (trans);
gst_video_detect_yuv (videodetect, buf);
return ret;
}
static void
gst_video_detect_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstVideoDetect *videodetect;
videodetect = GST_VIDEO_DETECT (object);
switch (prop_id) {
case PROP_MESSAGE:
videodetect->message = g_value_get_boolean (value);
break;
case PROP_PATTERN_WIDTH:
videodetect->pattern_width = g_value_get_int (value);
break;
case PROP_PATTERN_HEIGHT:
videodetect->pattern_height = g_value_get_int (value);
break;
case PROP_PATTERN_COUNT:
videodetect->pattern_count = g_value_get_int (value);
break;
case PROP_PATTERN_DATA_COUNT:
videodetect->pattern_data_count = g_value_get_int (value);
break;
case PROP_PATTERN_CENTER:
videodetect->pattern_center = g_value_get_double (value);
break;
case PROP_PATTERN_SENSITIVITY:
videodetect->pattern_sensitivity = g_value_get_double (value);
break;
case PROP_LEFT_OFFSET:
videodetect->left_offset = g_value_get_int (value);
break;
case PROP_BOTTOM_OFFSET:
videodetect->bottom_offset = g_value_get_int (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_video_detect_get_property (GObject * object, guint prop_id, GValue * value,
GParamSpec * pspec)
{
GstVideoDetect *videodetect;
videodetect = GST_VIDEO_DETECT (object);
switch (prop_id) {
case PROP_MESSAGE:
g_value_set_boolean (value, videodetect->message);
break;
case PROP_PATTERN_WIDTH:
g_value_set_int (value, videodetect->pattern_width);
break;
case PROP_PATTERN_HEIGHT:
g_value_set_int (value, videodetect->pattern_height);
break;
case PROP_PATTERN_COUNT:
g_value_set_int (value, videodetect->pattern_count);
break;
case PROP_PATTERN_DATA_COUNT:
g_value_set_int (value, videodetect->pattern_data_count);
break;
case PROP_PATTERN_CENTER:
g_value_set_double (value, videodetect->pattern_center);
break;
case PROP_PATTERN_SENSITIVITY:
g_value_set_double (value, videodetect->pattern_sensitivity);
break;
case PROP_LEFT_OFFSET:
g_value_set_int (value, videodetect->left_offset);
break;
case PROP_BOTTOM_OFFSET:
g_value_set_int (value, videodetect->bottom_offset);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_video_detect_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
gst_element_class_set_static_metadata (element_class, "Video detecter",
"Filter/Effect/Video",
gst_element_class_set_static_metadata (GST_ELEMENT_CLASS (klass),
"Video detecter", "Filter/Effect/Video",
"Detect patterns in a video signal", "Wim Taymans <wim@fluendo.com>");
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_video_detect_sink_template));
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_video_detect_src_template));
}
static void
gst_video_detect_class_init (gpointer klass, gpointer class_data)
{
GObjectClass *gobject_class;
GstBaseTransformClass *trans_class;
gobject_class = (GObjectClass *) klass;
trans_class = (GstBaseTransformClass *) klass;
parent_class = g_type_class_peek_parent (klass);
gobject_class->set_property = gst_video_detect_set_property;
gobject_class->get_property = gst_video_detect_get_property;
gobject_class->dispose = gst_video_detect_dispose;
gobject_class->finalize = gst_video_detect_finalize;
base_transform_class->start = GST_DEBUG_FUNCPTR (gst_video_detect_start);
base_transform_class->stop = GST_DEBUG_FUNCPTR (gst_video_detect_stop);
video_filter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_detect_set_info);
video_filter_class->transform_frame_ip =
GST_DEBUG_FUNCPTR (gst_video_detect_transform_frame_ip);
g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_MESSAGE,
g_param_spec_boolean ("message", "Message",
"Post statics messages",
"Post detected data as bus messages",
DEFAULT_MESSAGE,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_WIDTH,
@ -503,47 +236,304 @@ gst_video_detect_class_init (gpointer klass, gpointer class_data)
"The offset from the bottom border where the pattern starts", 0,
G_MAXINT, DEFAULT_BOTTOM_OFFSET,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_detect_set_caps);
trans_class->transform_ip = GST_DEBUG_FUNCPTR (gst_video_detect_transform_ip);
trans_class->passthrough_on_same_caps = TRUE;
GST_DEBUG_CATEGORY_INIT (video_detect_debug, "videodetect", 0,
"Video detect");
}
static void
gst_video_detect_init (GTypeInstance * instance, gpointer g_class)
gst_video_detect_init (GstVideoDetect * videodetect)
{
GstVideoDetect *videodetect;
videodetect = GST_VIDEO_DETECT (instance);
GST_DEBUG_OBJECT (videodetect, "gst_video_detect_init");
videodetect->in_pattern = FALSE;
}
GType
gst_video_detect_get_type (void)
void
gst_video_detect_set_property (GObject * object, guint property_id,
const GValue * value, GParamSpec * pspec)
{
static GType video_detect_type = 0;
GstVideoDetect *videodetect = GST_VIDEO_DETECT (object);
if (!video_detect_type) {
static const GTypeInfo video_detect_info = {
sizeof (GstVideoDetectClass),
gst_video_detect_base_init,
NULL,
gst_video_detect_class_init,
NULL,
NULL,
sizeof (GstVideoDetect),
0,
gst_video_detect_init,
};
GST_DEBUG_OBJECT (videodetect, "set_property");
video_detect_type = g_type_register_static (GST_TYPE_VIDEO_FILTER,
"GstVideoDetect", &video_detect_info, 0);
switch (property_id) {
case PROP_MESSAGE:
videodetect->message = g_value_get_boolean (value);
break;
case PROP_PATTERN_WIDTH:
videodetect->pattern_width = g_value_get_int (value);
break;
case PROP_PATTERN_HEIGHT:
videodetect->pattern_height = g_value_get_int (value);
break;
case PROP_PATTERN_COUNT:
videodetect->pattern_count = g_value_get_int (value);
break;
case PROP_PATTERN_DATA_COUNT:
videodetect->pattern_data_count = g_value_get_int (value);
break;
case PROP_PATTERN_CENTER:
videodetect->pattern_center = g_value_get_double (value);
break;
case PROP_PATTERN_SENSITIVITY:
videodetect->pattern_sensitivity = g_value_get_double (value);
break;
case PROP_LEFT_OFFSET:
videodetect->left_offset = g_value_get_int (value);
break;
case PROP_BOTTOM_OFFSET:
videodetect->bottom_offset = g_value_get_int (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
}
return video_detect_type;
}
void
gst_video_detect_get_property (GObject * object, guint property_id,
GValue * value, GParamSpec * pspec)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (object);
GST_DEBUG_OBJECT (videodetect, "get_property");
switch (property_id) {
case PROP_MESSAGE:
g_value_set_boolean (value, videodetect->message);
break;
case PROP_PATTERN_WIDTH:
g_value_set_int (value, videodetect->pattern_width);
break;
case PROP_PATTERN_HEIGHT:
g_value_set_int (value, videodetect->pattern_height);
break;
case PROP_PATTERN_COUNT:
g_value_set_int (value, videodetect->pattern_count);
break;
case PROP_PATTERN_DATA_COUNT:
g_value_set_int (value, videodetect->pattern_data_count);
break;
case PROP_PATTERN_CENTER:
g_value_set_double (value, videodetect->pattern_center);
break;
case PROP_PATTERN_SENSITIVITY:
g_value_set_double (value, videodetect->pattern_sensitivity);
break;
case PROP_LEFT_OFFSET:
g_value_set_int (value, videodetect->left_offset);
break;
case PROP_BOTTOM_OFFSET:
g_value_set_int (value, videodetect->bottom_offset);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
}
}
void
gst_video_detect_dispose (GObject * object)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (object);
GST_DEBUG_OBJECT (videodetect, "dispose");
/* clean up as possible. may be called multiple times */
G_OBJECT_CLASS (gst_video_detect_parent_class)->dispose (object);
}
void
gst_video_detect_finalize (GObject * object)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (object);
GST_DEBUG_OBJECT (videodetect, "finalize");
/* clean up object here */
G_OBJECT_CLASS (gst_video_detect_parent_class)->finalize (object);
}
static gboolean
gst_video_detect_start (GstBaseTransform * trans)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (trans);
GST_DEBUG_OBJECT (videodetect, "start");
return TRUE;
}
static gboolean
gst_video_detect_stop (GstBaseTransform * trans)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (trans);
GST_DEBUG_OBJECT (videodetect, "stop");
return TRUE;
}
static gboolean
gst_video_detect_set_info (GstVideoFilter * filter, GstCaps * incaps,
GstVideoInfo * in_info, GstCaps * outcaps, GstVideoInfo * out_info)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (filter);
GST_DEBUG_OBJECT (videodetect, "set_info");
return TRUE;
}
static void
gst_video_detect_post_message (GstVideoDetect * videodetect, GstBuffer * buffer,
guint64 data)
{
GstBaseTransform *trans;
GstMessage *m;
guint64 duration, timestamp, running_time, stream_time;
trans = GST_BASE_TRANSFORM_CAST (videodetect);
/* get timestamps */
timestamp = GST_BUFFER_TIMESTAMP (buffer);
duration = GST_BUFFER_DURATION (buffer);
running_time = gst_segment_to_running_time (&trans->segment, GST_FORMAT_TIME,
timestamp);
stream_time = gst_segment_to_stream_time (&trans->segment, GST_FORMAT_TIME,
timestamp);
/* post message */
m = gst_message_new_element (GST_OBJECT_CAST (videodetect),
gst_structure_new ("GstVideoDetect",
"have-pattern", G_TYPE_BOOLEAN, videodetect->in_pattern,
"timestamp", G_TYPE_UINT64, timestamp,
"stream-time", G_TYPE_UINT64, stream_time,
"running-time", G_TYPE_UINT64, running_time,
"duration", G_TYPE_UINT64, duration,
"data", G_TYPE_UINT64, data, NULL));
gst_element_post_message (GST_ELEMENT_CAST (videodetect), m);
}
static gdouble
gst_video_detect_calc_brightness (GstVideoDetect * videodetect, guint8 * data,
gint width, gint height, gint row_stride, gint pixel_stride)
{
gint i, j;
guint64 sum;
sum = 0;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
sum += data[pixel_stride * j];
}
data += row_stride;
}
return sum / (255.0 * width * height);
}
static void
gst_video_detect_yuv (GstVideoDetect * videodetect, GstVideoFrame * frame)
{
gdouble brightness;
gint i, pw, ph, row_stride, pixel_stride;
gint width, height, req_width, req_height;
guint8 *d;
guint64 pattern_data;
width = frame->info.width;
height = frame->info.height;
pw = videodetect->pattern_width;
ph = videodetect->pattern_height;
row_stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
req_width =
(videodetect->pattern_count + videodetect->pattern_data_count) * pw +
videodetect->left_offset;
req_height = videodetect->bottom_offset + ph;
if (req_width > width || req_height > height) {
goto no_pattern;
}
/* analyse the bottom left pixels */
for (i = 0; i < videodetect->pattern_count; i++) {
d = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
/* move to start of bottom left, adjust for offsets */
d += row_stride * (height - ph - videodetect->bottom_offset) +
pixel_stride * videodetect->left_offset;
/* move to i-th pattern */
d += pixel_stride * pw * i;
/* calc brightness of width * height box */
brightness = gst_video_detect_calc_brightness (videodetect, d, pw, ph,
row_stride, pixel_stride);
GST_DEBUG_OBJECT (videodetect, "brightness %f", brightness);
if (i & 1) {
/* odd pixels must be white, all pixels darker than the center +
* sensitivity are considered wrong. */
if (brightness <
(videodetect->pattern_center + videodetect->pattern_sensitivity))
goto no_pattern;
} else {
/* even pixels must be black, pixels lighter than the center - sensitivity
* are considered wrong. */
if (brightness >
(videodetect->pattern_center - videodetect->pattern_sensitivity))
goto no_pattern;
}
}
GST_DEBUG_OBJECT (videodetect, "found pattern");
pattern_data = 0;
/* get the data of the pattern */
for (i = 0; i < videodetect->pattern_data_count; i++) {
d = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
/* move to start of bottom left, adjust for offsets */
d += row_stride * (height - ph - videodetect->bottom_offset) +
pixel_stride * videodetect->left_offset;
/* move after the fixed pattern */
d += pixel_stride * (videodetect->pattern_count * pw);
/* move to i-th pattern data */
d += pixel_stride * pw * i;
/* calc brightness of width * height box */
brightness = gst_video_detect_calc_brightness (videodetect, d, pw, ph,
row_stride, pixel_stride);
/* update pattern, we just use the center to decide between black and white. */
pattern_data <<= 1;
if (brightness > videodetect->pattern_center)
pattern_data |= 1;
}
GST_DEBUG_OBJECT (videodetect, "have data %" G_GUINT64_FORMAT, pattern_data);
videodetect->in_pattern = TRUE;
gst_video_detect_post_message (videodetect, frame->buffer, pattern_data);
return;
no_pattern:
{
GST_DEBUG_OBJECT (videodetect, "no pattern found");
if (videodetect->in_pattern) {
videodetect->in_pattern = FALSE;
gst_video_detect_post_message (videodetect, frame->buffer, 0);
}
return;
}
}
static GstFlowReturn
gst_video_detect_transform_frame_ip (GstVideoFilter * filter,
GstVideoFrame * frame)
{
GstVideoDetect *videodetect = GST_VIDEO_DETECT (filter);
GST_DEBUG_OBJECT (videodetect, "transform_frame_ip");
gst_video_detect_yuv (videodetect, frame);
return GST_FLOW_OK;
}

View file

@ -17,38 +17,26 @@
* Boston, MA 02110-1301, USA.
*/
#ifndef __GST_VIDEO_DETECT_H__
#define __GST_VIDEO_DETECT_H__
#ifndef _GST_VIDEO_DETECT_H_
#define _GST_VIDEO_DETECT_H_
#include <gst/video/gstvideofilter.h>
#include <gst/video/video.h>
#include <gst/video/gstvideofilter.h>
G_BEGIN_DECLS
#define GST_TYPE_VIDEO_DETECT \
(gst_video_detect_get_type())
#define GST_VIDEO_DETECT(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_DETECT,GstVideoDetect))
#define GST_VIDEO_DETECT_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_DETECT,GstVideoDetectClass))
#define GST_IS_VIDEO_DETECT(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_DETECT))
#define GST_IS_VIDEO_DETECT_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_DETECT))
#define GST_TYPE_VIDEO_DETECT (gst_video_detect_get_type())
#define GST_VIDEO_DETECT(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_DETECT,GstVideoDetect))
#define GST_VIDEO_DETECT_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_DETECT,GstVideoDetectClass))
#define GST_IS_VIDEO_DETECT(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_DETECT))
#define GST_IS_VIDEO_DETECT_CLASS(obj) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_DETECT))
typedef struct _GstVideoDetect GstVideoDetect;
typedef struct _GstVideoDetectClass GstVideoDetectClass;
/**
* GstVideoDetect:
*
* Opaque datastructure.
*/
struct _GstVideoDetect {
GstVideoFilter videofilter;
gint width, height;
GstVideoFormat format;
struct _GstVideoDetect
{
GstVideoFilter base_videodetect;
gboolean message;
gint pattern_width;
@ -63,12 +51,13 @@ struct _GstVideoDetect {
gboolean in_pattern;
};
struct _GstVideoDetectClass {
GstVideoFilterClass parent_class;
struct _GstVideoDetectClass
{
GstVideoFilterClass base_videodetect_class;
};
GType gst_video_detect_get_type (void);
G_END_DECLS
#endif /* __GST_VIDEO_DETECT_H__ */
#endif

View file

@ -13,10 +13,9 @@
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
* Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
* Boston, MA 02110-1335, USA.
*/
/**
* SECTION:element-videomark
* @see_also: #GstVideoDetect
@ -48,14 +47,44 @@
#include "config.h"
#endif
#include <gst/gst.h>
#include <gst/video/video.h>
#include <gst/video/gstvideofilter.h>
#include "gstvideomark.h"
#include <string.h>
#include <math.h>
GST_DEBUG_CATEGORY_STATIC (gst_video_mark_debug_category);
#define GST_CAT_DEFAULT gst_video_mark_debug_category
#include <gst/video/video.h>
/* prototypes */
/* GstVideoMark signals and args */
static void gst_video_mark_set_property (GObject * object,
guint property_id, const GValue * value, GParamSpec * pspec);
static void gst_video_mark_get_property (GObject * object,
guint property_id, GValue * value, GParamSpec * pspec);
static void gst_video_mark_dispose (GObject * object);
static void gst_video_mark_finalize (GObject * object);
static gboolean gst_video_mark_start (GstBaseTransform * trans);
static gboolean gst_video_mark_stop (GstBaseTransform * trans);
static gboolean gst_video_mark_set_info (GstVideoFilter * filter,
GstCaps * incaps, GstVideoInfo * in_info, GstCaps * outcaps,
GstVideoInfo * out_info);
static GstFlowReturn gst_video_mark_transform_frame_ip (GstVideoFilter * filter,
GstVideoFrame * frame);
enum
{
PROP_0,
PROP_PATTERN_WIDTH,
PROP_PATTERN_HEIGHT,
PROP_PATTERN_COUNT,
PROP_PATTERN_DATA_COUNT,
PROP_PATTERN_DATA,
PROP_ENABLED,
PROP_LEFT_OFFSET,
PROP_BOTTOM_OFFSET
};
#define DEFAULT_PATTERN_WIDTH 4
#define DEFAULT_PATTERN_HEIGHT 16
@ -66,62 +95,227 @@
#define DEFAULT_LEFT_OFFSET 0
#define DEFAULT_BOTTOM_OFFSET 0
enum
/* pad templates */
#define VIDEO_CAPS \
GST_VIDEO_CAPS_MAKE( \
"{ I420, YV12, Y41B, Y42B, Y444, YUY2, UYVY, AYUV, YVYU }")
/* class initialization */
G_DEFINE_TYPE_WITH_CODE (GstVideoMark, gst_video_mark, GST_TYPE_VIDEO_FILTER,
GST_DEBUG_CATEGORY_INIT (gst_video_mark_debug_category, "videomark", 0,
"debug category for videomark element"));
static void
gst_video_mark_class_init (GstVideoMarkClass * klass)
{
PROP_0,
PROP_PATTERN_WIDTH,
PROP_PATTERN_HEIGHT,
PROP_PATTERN_COUNT,
PROP_PATTERN_DATA_COUNT,
PROP_PATTERN_DATA,
PROP_PATTERN_DATA_64,
PROP_ENABLED,
PROP_LEFT_OFFSET,
PROP_BOTTOM_OFFSET
};
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
GstBaseTransformClass *base_transform_class =
GST_BASE_TRANSFORM_CLASS (klass);
GstVideoFilterClass *video_filter_class = GST_VIDEO_FILTER_CLASS (klass);
GST_DEBUG_CATEGORY_STATIC (video_mark_debug);
#define GST_CAT_DEFAULT video_mark_debug
gst_element_class_add_pad_template (GST_ELEMENT_CLASS (klass),
gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
gst_caps_from_string (VIDEO_CAPS)));
gst_element_class_add_pad_template (GST_ELEMENT_CLASS (klass),
gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
gst_caps_from_string (VIDEO_CAPS)));
static GstStaticPadTemplate gst_video_mark_src_template =
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV
("{ I420, YV12, Y41B, Y42B, Y444, YUY2, UYVY, AYUV, YVYU }"))
);
gst_element_class_set_static_metadata (GST_ELEMENT_CLASS (klass),
"Video marker", "Filter/Effect/Video",
"Marks a video signal with a pattern", "Wim Taymans <wim@fluendo.com>");
static GstStaticPadTemplate gst_video_mark_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV
("{ I420, YV12, Y41B, Y42B, Y444, YUY2, UYVY, AYUV, YVYU }"))
);
gobject_class->set_property = gst_video_mark_set_property;
gobject_class->get_property = gst_video_mark_get_property;
gobject_class->dispose = gst_video_mark_dispose;
gobject_class->finalize = gst_video_mark_finalize;
base_transform_class->start = GST_DEBUG_FUNCPTR (gst_video_mark_start);
base_transform_class->stop = GST_DEBUG_FUNCPTR (gst_video_mark_stop);
video_filter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_mark_set_info);
video_filter_class->transform_frame_ip =
GST_DEBUG_FUNCPTR (gst_video_mark_transform_frame_ip);
static GstVideoFilterClass *parent_class = NULL;
g_object_class_install_property (gobject_class, PROP_PATTERN_WIDTH,
g_param_spec_int ("pattern-width", "Pattern width",
"The width of the pattern markers", 1, G_MAXINT,
DEFAULT_PATTERN_WIDTH,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_HEIGHT,
g_param_spec_int ("pattern-height", "Pattern height",
"The height of the pattern markers", 1, G_MAXINT,
DEFAULT_PATTERN_HEIGHT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_COUNT,
g_param_spec_int ("pattern-count", "Pattern count",
"The number of pattern markers", 0, G_MAXINT,
DEFAULT_PATTERN_COUNT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_DATA_COUNT,
g_param_spec_int ("pattern-data-count", "Pattern data count",
"The number of extra data pattern markers", 0, 64,
DEFAULT_PATTERN_DATA_COUNT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_DATA,
g_param_spec_uint64 ("pattern-data", "Pattern data",
"The extra data pattern markers", 0, G_MAXUINT64,
DEFAULT_PATTERN_DATA,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_ENABLED,
g_param_spec_boolean ("enabled", "Enabled",
"Enable or disable the filter",
DEFAULT_ENABLED,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_LEFT_OFFSET,
g_param_spec_int ("left-offset", "Left Offset",
"The offset from the left border where the pattern starts", 0,
G_MAXINT, DEFAULT_LEFT_OFFSET,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_BOTTOM_OFFSET,
g_param_spec_int ("bottom-offset", "Bottom Offset",
"The offset from the bottom border where the pattern starts", 0,
G_MAXINT, DEFAULT_BOTTOM_OFFSET,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
}
static void
gst_video_mark_init (GstVideoMark * videomark)
{
}
void
gst_video_mark_set_property (GObject * object, guint property_id,
const GValue * value, GParamSpec * pspec)
{
GstVideoMark *videomark = GST_VIDEO_MARK (object);
GST_DEBUG_OBJECT (videomark, "set_property");
switch (property_id) {
case PROP_PATTERN_WIDTH:
videomark->pattern_width = g_value_get_int (value);
break;
case PROP_PATTERN_HEIGHT:
videomark->pattern_height = g_value_get_int (value);
break;
case PROP_PATTERN_COUNT:
videomark->pattern_count = g_value_get_int (value);
break;
case PROP_PATTERN_DATA_COUNT:
videomark->pattern_data_count = g_value_get_int (value);
break;
case PROP_PATTERN_DATA:
videomark->pattern_data = g_value_get_uint64 (value);
break;
case PROP_ENABLED:
videomark->enabled = g_value_get_boolean (value);
break;
case PROP_LEFT_OFFSET:
videomark->left_offset = g_value_get_int (value);
break;
case PROP_BOTTOM_OFFSET:
videomark->bottom_offset = g_value_get_int (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
}
}
void
gst_video_mark_get_property (GObject * object, guint property_id,
GValue * value, GParamSpec * pspec)
{
GstVideoMark *videomark = GST_VIDEO_MARK (object);
GST_DEBUG_OBJECT (videomark, "get_property");
switch (property_id) {
case PROP_PATTERN_WIDTH:
g_value_set_int (value, videomark->pattern_width);
break;
case PROP_PATTERN_HEIGHT:
g_value_set_int (value, videomark->pattern_height);
break;
case PROP_PATTERN_COUNT:
g_value_set_int (value, videomark->pattern_count);
break;
case PROP_PATTERN_DATA_COUNT:
g_value_set_int (value, videomark->pattern_data_count);
break;
case PROP_PATTERN_DATA:
g_value_set_uint64 (value, videomark->pattern_data);
break;
case PROP_ENABLED:
g_value_set_boolean (value, videomark->enabled);
break;
case PROP_LEFT_OFFSET:
g_value_set_int (value, videomark->left_offset);
break;
case PROP_BOTTOM_OFFSET:
g_value_set_int (value, videomark->bottom_offset);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
}
}
void
gst_video_mark_dispose (GObject * object)
{
GstVideoMark *videomark = GST_VIDEO_MARK (object);
GST_DEBUG_OBJECT (videomark, "dispose");
/* clean up as possible. may be called multiple times */
G_OBJECT_CLASS (gst_video_mark_parent_class)->dispose (object);
}
void
gst_video_mark_finalize (GObject * object)
{
GstVideoMark *videomark = GST_VIDEO_MARK (object);
GST_DEBUG_OBJECT (videomark, "finalize");
/* clean up object here */
G_OBJECT_CLASS (gst_video_mark_parent_class)->finalize (object);
}
static gboolean
gst_video_mark_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
GstCaps * outcaps)
gst_video_mark_start (GstBaseTransform * trans)
{
GstVideoMark *vf;
GstStructure *in_s;
guint32 fourcc;
gboolean ret;
GstVideoMark *videomark = GST_VIDEO_MARK (trans);
vf = GST_VIDEO_MARK (btrans);
GST_DEBUG_OBJECT (videomark, "start");
in_s = gst_caps_get_structure (incaps, 0);
return TRUE;
}
ret = gst_structure_get_int (in_s, "width", &vf->width);
ret &= gst_structure_get_int (in_s, "height", &vf->height);
ret &= gst_structure_get_fourcc (in_s, "format", &fourcc);
static gboolean
gst_video_mark_stop (GstBaseTransform * trans)
{
GstVideoMark *videomark = GST_VIDEO_MARK (trans);
if (ret)
vf->format = gst_video_format_from_fourcc (fourcc);
GST_DEBUG_OBJECT (videomark, "stop");
return ret;
return TRUE;
}
static gboolean
gst_video_mark_set_info (GstVideoFilter * filter, GstCaps * incaps,
GstVideoInfo * in_info, GstCaps * outcaps, GstVideoInfo * out_info)
{
GstVideoMark *videomark = GST_VIDEO_MARK (filter);
GST_DEBUG_OBJECT (videomark, "set_info");
return TRUE;
}
static void
@ -139,26 +333,21 @@ gst_video_mark_draw_box (GstVideoMark * videomark, guint8 * data,
}
static GstFlowReturn
gst_video_mark_yuv (GstVideoMark * videomark, GstBuffer * buffer)
gst_video_mark_yuv (GstVideoMark * videomark, GstVideoFrame * frame)
{
GstVideoFormat format;
gint i, pw, ph, row_stride, pixel_stride, offset;
gint i, pw, ph, row_stride, pixel_stride;
gint width, height, req_width, req_height;
guint8 *d, *data;
guint8 *d;
guint64 pattern_shift;
guint8 color;
data = GST_BUFFER_DATA (buffer);
format = videomark->format;
width = videomark->width;
height = videomark->height;
width = frame->info.width;
height = frame->info.height;
pw = videomark->pattern_width;
ph = videomark->pattern_height;
row_stride = gst_video_format_get_row_stride (format, 0, width);
pixel_stride = gst_video_format_get_pixel_stride (format, 0);
offset = gst_video_format_get_component_offset (format, 0, width, height);
row_stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
pixel_stride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
req_width =
(videomark->pattern_count + videomark->pattern_data_count) * pw +
@ -173,7 +362,7 @@ gst_video_mark_yuv (GstVideoMark * videomark, GstBuffer * buffer)
/* draw the bottom left pixels */
for (i = 0; i < videomark->pattern_count; i++) {
d = data + offset;
d = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
/* move to start of bottom left */
d += row_stride * (height - ph - videomark->bottom_offset) +
pixel_stride * videomark->left_offset;
@ -195,7 +384,7 @@ gst_video_mark_yuv (GstVideoMark * videomark, GstBuffer * buffer)
/* get the data of the pattern */
for (i = 0; i < videomark->pattern_data_count; i++) {
d = data + offset;
d = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
/* move to start of bottom left, adjust for offsets */
d += row_stride * (height - ph - videomark->bottom_offset) +
pixel_stride * videomark->left_offset;
@ -218,214 +407,17 @@ gst_video_mark_yuv (GstVideoMark * videomark, GstBuffer * buffer)
return GST_FLOW_OK;
}
static GstFlowReturn
gst_video_mark_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
{
GstVideoMark *videomark;
GstFlowReturn ret = GST_FLOW_OK;
videomark = GST_VIDEO_MARK (trans);
static GstFlowReturn
gst_video_mark_transform_frame_ip (GstVideoFilter * filter,
GstVideoFrame * frame)
{
GstVideoMark *videomark = GST_VIDEO_MARK (filter);
GST_DEBUG_OBJECT (videomark, "transform_frame_ip");
if (videomark->enabled)
return gst_video_mark_yuv (videomark, buf);
return gst_video_mark_yuv (videomark, frame);
return ret;
}
static void
gst_video_mark_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstVideoMark *videomark;
videomark = GST_VIDEO_MARK (object);
switch (prop_id) {
case PROP_PATTERN_WIDTH:
videomark->pattern_width = g_value_get_int (value);
break;
case PROP_PATTERN_HEIGHT:
videomark->pattern_height = g_value_get_int (value);
break;
case PROP_PATTERN_COUNT:
videomark->pattern_count = g_value_get_int (value);
break;
case PROP_PATTERN_DATA_COUNT:
videomark->pattern_data_count = g_value_get_int (value);
break;
case PROP_PATTERN_DATA_64:
videomark->pattern_data = g_value_get_uint64 (value);
break;
case PROP_PATTERN_DATA:
videomark->pattern_data = g_value_get_int (value);
break;
case PROP_ENABLED:
videomark->enabled = g_value_get_boolean (value);
break;
case PROP_LEFT_OFFSET:
videomark->left_offset = g_value_get_int (value);
break;
case PROP_BOTTOM_OFFSET:
videomark->bottom_offset = g_value_get_int (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_video_mark_get_property (GObject * object, guint prop_id, GValue * value,
GParamSpec * pspec)
{
GstVideoMark *videomark;
videomark = GST_VIDEO_MARK (object);
switch (prop_id) {
case PROP_PATTERN_WIDTH:
g_value_set_int (value, videomark->pattern_width);
break;
case PROP_PATTERN_HEIGHT:
g_value_set_int (value, videomark->pattern_height);
break;
case PROP_PATTERN_COUNT:
g_value_set_int (value, videomark->pattern_count);
break;
case PROP_PATTERN_DATA_COUNT:
g_value_set_int (value, videomark->pattern_data_count);
break;
case PROP_PATTERN_DATA_64:
g_value_set_uint64 (value, videomark->pattern_data);
break;
case PROP_PATTERN_DATA:
g_value_set_int (value, MIN (videomark->pattern_data, G_MAXINT));
break;
case PROP_ENABLED:
g_value_set_boolean (value, videomark->enabled);
break;
case PROP_LEFT_OFFSET:
g_value_set_int (value, videomark->left_offset);
break;
case PROP_BOTTOM_OFFSET:
g_value_set_int (value, videomark->bottom_offset);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_video_mark_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
gst_element_class_set_static_metadata (element_class, "Video marker",
"Filter/Effect/Video",
"Marks a video signal with a pattern", "Wim Taymans <wim@fluendo.com>");
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_video_mark_sink_template));
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_video_mark_src_template));
}
static void
gst_video_mark_class_init (gpointer klass, gpointer class_data)
{
GObjectClass *gobject_class;
GstBaseTransformClass *trans_class;
gobject_class = (GObjectClass *) klass;
trans_class = (GstBaseTransformClass *) klass;
parent_class = g_type_class_peek_parent (klass);
gobject_class->set_property = gst_video_mark_set_property;
gobject_class->get_property = gst_video_mark_get_property;
g_object_class_install_property (gobject_class, PROP_PATTERN_WIDTH,
g_param_spec_int ("pattern-width", "Pattern width",
"The width of the pattern markers", 1, G_MAXINT,
DEFAULT_PATTERN_WIDTH,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_HEIGHT,
g_param_spec_int ("pattern-height", "Pattern height",
"The height of the pattern markers", 1, G_MAXINT,
DEFAULT_PATTERN_HEIGHT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_COUNT,
g_param_spec_int ("pattern-count", "Pattern count",
"The number of pattern markers", 0, G_MAXINT,
DEFAULT_PATTERN_COUNT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_DATA_COUNT,
g_param_spec_int ("pattern-data-count", "Pattern data count",
"The number of extra data pattern markers", 0, 64,
DEFAULT_PATTERN_DATA_COUNT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_DATA_64,
g_param_spec_uint64 ("pattern-data-uint64", "Pattern data",
"The extra data pattern markers", 0, G_MAXUINT64,
DEFAULT_PATTERN_DATA,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PATTERN_DATA,
g_param_spec_int ("pattern-data", "Pattern data",
"The extra data pattern markers", 0, G_MAXINT,
DEFAULT_PATTERN_DATA, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_ENABLED,
g_param_spec_boolean ("enabled", "Enabled",
"Enable or disable the filter",
DEFAULT_ENABLED,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_LEFT_OFFSET,
g_param_spec_int ("left-offset", "Left Offset",
"The offset from the left border where the pattern starts", 0,
G_MAXINT, DEFAULT_LEFT_OFFSET,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_BOTTOM_OFFSET,
g_param_spec_int ("bottom-offset", "Bottom Offset",
"The offset from the bottom border where the pattern starts", 0,
G_MAXINT, DEFAULT_BOTTOM_OFFSET,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_mark_set_caps);
trans_class->transform_ip = GST_DEBUG_FUNCPTR (gst_video_mark_transform_ip);
GST_DEBUG_CATEGORY_INIT (video_mark_debug, "videomark", 0, "Video mark");
}
static void
gst_video_mark_init (GTypeInstance * instance, gpointer g_class)
{
GstVideoMark *videomark;
videomark = GST_VIDEO_MARK (instance);
GST_DEBUG_OBJECT (videomark, "gst_video_mark_init");
}
GType
gst_video_mark_get_type (void)
{
static GType video_mark_type = 0;
if (!video_mark_type) {
static const GTypeInfo video_mark_info = {
sizeof (GstVideoMarkClass),
gst_video_mark_base_init,
NULL,
gst_video_mark_class_init,
NULL,
NULL,
sizeof (GstVideoMark),
0,
gst_video_mark_init,
};
video_mark_type = g_type_register_static (GST_TYPE_VIDEO_FILTER,
"GstVideoMark", &video_mark_info, 0);
}
return video_mark_type;
return GST_FLOW_OK;
}

View file

@ -17,39 +17,28 @@
* Boston, MA 02110-1301, USA.
*/
#ifndef __GST_VIDEO_MARK_H__
#define __GST_VIDEO_MARK_H__
#ifndef _GST_VIDEO_MARK_H_
#define _GST_VIDEO_MARK_H_
#include <gst/video/gstvideofilter.h>
#include <gst/video/video.h>
#include <gst/video/gstvideofilter.h>
G_BEGIN_DECLS
#define GST_TYPE_VIDEO_MARK \
(gst_video_mark_get_type())
#define GST_VIDEO_MARK(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_MARK,GstVideoMark))
#define GST_VIDEO_MARK_CLASS(klass) \
(G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_MARK,GstVideoMarkClass))
#define GST_IS_VIDEO_MARK(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_MARK))
#define GST_IS_VIDEO_MARK_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_MARK))
#define GST_TYPE_VIDEO_MARK (gst_video_mark_get_type())
#define GST_VIDEO_MARK(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_MARK,GstVideoMark))
#define GST_VIDEO_MARK_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_MARK,GstVideoMarkClass))
#define GST_IS_VIDEO_MARK(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_MARK))
#define GST_IS_VIDEO_MARK_CLASS(obj) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_MARK))
typedef struct _GstVideoMark GstVideoMark;
typedef struct _GstVideoMarkClass GstVideoMarkClass;
/**
* GstVideoMark:
*
* Opaque datastructure.
*/
struct _GstVideoMark {
GstVideoFilter videofilter;
gint width, height;
GstVideoFormat format;
struct _GstVideoMark
{
GstVideoFilter base_videomark;
/* properties */
gint pattern_width;
gint pattern_height;
gint pattern_count;
@ -60,12 +49,13 @@ struct _GstVideoMark {
gint bottom_offset;
};
struct _GstVideoMarkClass {
GstVideoFilterClass parent_class;
struct _GstVideoMarkClass
{
GstVideoFilterClass base_videomark_class;
};
GType gst_video_mark_get_type (void);
G_END_DECLS
#endif /* __GST_VIDEO_MARK_H__ */
#endif

View file

@ -33,7 +33,6 @@ plugin_init (GstPlugin * plugin)
res = gst_element_register (plugin, "videoanalyse", GST_RANK_NONE,
GST_TYPE_VIDEO_ANALYSE);
#if 0
/* FIXME under no circumstances is anyone allowed to revive the
* element formerly known as videodetect without changing the name
* first. XOXO --ds */
@ -43,7 +42,6 @@ plugin_init (GstPlugin * plugin)
res &= gst_element_register (plugin, "videomark", GST_RANK_NONE,
GST_TYPE_VIDEO_MARK);
#endif
return res;
}