opencv: port to c++

This commit is contained in:
Nicola Murino 2018-12-01 22:48:53 +01:00 committed by Nicolas Dufresne
parent f974246647
commit 26cabf9b1f
45 changed files with 853 additions and 1308 deletions

View file

@ -1,7 +1,7 @@
/*
* GStreamer
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2011 Nicola Murino <nicola.murino@gmail.com>
* Copyright (C) 2011 - 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -49,9 +49,6 @@
#include <errno.h>
#include "MotionCells.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
MotionCells::MotionCells ()
{
@ -60,11 +57,6 @@ MotionCells::MotionCells ()
m_motioncellsidxcstr = NULL;
m_saveInDatafile = false;
mc_savefile = NULL;
m_pcurFrame = NULL;
m_pprevFrame = NULL;
transparencyimg = NULL;
m_pdifferenceImage = NULL;
m_pbwImage = NULL;
m_initdatafilefailed = new char[BUSMSGLEN];
m_savedatafilefailed = new char[BUSMSGLEN];
m_initerrorcode = 0;
@ -101,20 +93,16 @@ MotionCells::~MotionCells ()
delete[]m_savedatafilefailed;
if (m_motioncellsidxcstr)
delete[]m_motioncellsidxcstr;
if (m_pcurFrame)
cvReleaseImage (&m_pcurFrame);
if (m_pprevFrame)
cvReleaseImage (&m_pprevFrame);
if (transparencyimg)
cvReleaseImage (&transparencyimg);
if (m_pdifferenceImage)
cvReleaseImage (&m_pdifferenceImage);
if (m_pbwImage)
cvReleaseImage (&m_pbwImage);
m_pcurFrame.release ();
m_pprevFrame.release ();
transparencyimg.release ();
m_pdifferenceImage.release ();
m_pbwImage.release ();
}
int
MotionCells::performDetectionMotionCells (IplImage * p_frame,
MotionCells::performDetectionMotionCells (cv::Mat p_frame,
double p_sensitivity, double p_framerate, int p_gridx, int p_gridy,
gint64 timestamp_millisec, bool p_isVisible, bool p_useAlpha,
int motionmaskcoord_count, motionmaskcoordrect * motionmaskcoords,
@ -126,7 +114,7 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
int sumframecnt = 0;
int ret = 0;
CvSize frameSize;
cv::Size frameSize;
p_framerate >= 1 ? p_framerate <= 5 ? sumframecnt = 1
: p_framerate <= 10 ? sumframecnt = 2
@ -147,38 +135,36 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
return ret;
}
frameSize = cvGetSize (p_frame);
frameSize = p_frame.size ();
frameSize.width /= 2;
frameSize.height /= 2;
setMotionCells (frameSize.width, frameSize.height);
m_sensitivity = 1 - p_sensitivity;
m_isVisible = p_isVisible;
m_pcurFrame = cvCloneImage (p_frame);
IplImage *m_pcurgreyImage = cvCreateImage (frameSize, IPL_DEPTH_8U, 1);
IplImage *m_pprevgreyImage = cvCreateImage (frameSize, IPL_DEPTH_8U, 1);
IplImage *m_pgreyImage = cvCreateImage (frameSize, IPL_DEPTH_8U, 1);
IplImage *m_pcurDown =
cvCreateImage (frameSize, m_pcurFrame->depth, m_pcurFrame->nChannels);
IplImage *m_pprevDown = cvCreateImage (frameSize, m_pprevFrame->depth,
m_pprevFrame->nChannels);
m_pbwImage = cvCreateImage (frameSize, IPL_DEPTH_8U, 1);
cvPyrDown (m_pprevFrame, m_pprevDown);
cvCvtColor (m_pprevDown, m_pprevgreyImage, CV_RGB2GRAY);
cvPyrDown (m_pcurFrame, m_pcurDown);
cvCvtColor (m_pcurDown, m_pcurgreyImage, CV_RGB2GRAY);
m_pdifferenceImage = cvCloneImage (m_pcurgreyImage);
m_pcurFrame = p_frame.clone ();
cv::Mat m_pcurgreyImage = cv::Mat (frameSize, CV_8UC1);
cv::Mat m_pprevgreyImage = cv::Mat (frameSize, CV_8UC1);
cv::Mat m_pgreyImage = cv::Mat (frameSize, CV_8UC1);
cv::Mat m_pcurDown = cv::Mat (frameSize, m_pcurFrame.type ());
cv::Mat m_pprevDown = cv::Mat (frameSize, m_pprevFrame.type ());
m_pbwImage.create (frameSize, CV_8UC1);
pyrDown (m_pprevFrame, m_pprevDown);
cvtColor (m_pprevDown, m_pprevgreyImage, cv::COLOR_RGB2GRAY);
pyrDown (m_pcurFrame, m_pcurDown);
cvtColor (m_pcurDown, m_pcurgreyImage, cv::COLOR_RGB2GRAY);
m_pdifferenceImage = m_pcurgreyImage.clone ();
//cvSmooth(m_pcurgreyImage, m_pcurgreyImage, CV_GAUSSIAN, 3, 0);//TODO camera noise reduce,something smoothing, and rethink runningavg weights
//Minus the current gray frame from the 8U moving average.
cvAbsDiff (m_pprevgreyImage, m_pcurgreyImage, m_pdifferenceImage);
cv::absdiff (m_pprevgreyImage, m_pcurgreyImage, m_pdifferenceImage);
//Convert the image to black and white.
cvAdaptiveThreshold (m_pdifferenceImage, m_pbwImage, 255,
CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, 7);
cv::adaptiveThreshold (m_pdifferenceImage, m_pbwImage, 255,
cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 7, 5);
// Dilate and erode to get object blobs
cvDilate (m_pbwImage, m_pbwImage, NULL, 2);
cvErode (m_pbwImage, m_pbwImage, NULL, 2);
cv::dilate (m_pbwImage, m_pbwImage, cv::Mat (), cv::Point (-1, -1), 2);
cv::erode (m_pbwImage, m_pbwImage, cv::Mat (), cv::Point (-1, -1), 2);
//mask-out the overlay on difference image
if (motionmaskcoord_count > 0)
@ -188,15 +174,12 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
if (getIsNonZero (m_pbwImage)) { //detect Motion
if (m_MotionCells.size () > 0) //it contains previous motioncells what we used when frames dropped
m_MotionCells.clear ();
if (transparencyimg)
cvReleaseImage (&transparencyimg);
(motioncells_count > 0) ?
calculateMotionPercentInMotionCells (motioncellsidx,
motioncells_count)
: calculateMotionPercentInMotionCells (motionmaskcellsidx, 0);
transparencyimg = cvCreateImage (cvGetSize (p_frame), p_frame->depth, 3);
cvSetZero (transparencyimg);
transparencyimg = cv::Mat::zeros (p_frame.size (), p_frame.type ());
if (m_motioncellsidxcstr)
delete[]m_motioncellsidxcstr;
m_motioncells_idx_count = m_MotionCells.size () * MSGLEN; //one motion cell idx: (lin idx : col idx,) it's up to 6 character except last motion cell idx
@ -204,22 +187,20 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
char *tmpstr = new char[MSGLEN + 1];
tmpstr[0] = 0;
for (unsigned int i = 0; i < m_MotionCells.size (); i++) {
CvPoint pt1, pt2;
cv::Point pt1, pt2;
pt1.x = m_MotionCells.at (i).cell_pt1.x * 2;
pt1.y = m_MotionCells.at (i).cell_pt1.y * 2;
pt2.x = m_MotionCells.at (i).cell_pt2.x * 2;
pt2.y = m_MotionCells.at (i).cell_pt2.y * 2;
if (m_useAlpha && m_isVisible) {
cvRectangle (transparencyimg,
pt1,
pt2,
cv::rectangle (transparencyimg,
pt1, pt2,
CV_RGB (motioncellscolor.B_channel_value,
motioncellscolor.G_channel_value,
motioncellscolor.R_channel_value), CV_FILLED);
motioncellscolor.R_channel_value), cv::FILLED);
} else if (m_isVisible) {
cvRectangle (p_frame,
pt1,
pt2,
cv::rectangle (p_frame,
pt1, pt2,
CV_RGB (motioncellscolor.B_channel_value,
motioncellscolor.G_channel_value,
motioncellscolor.R_channel_value), p_thickness);
@ -256,30 +237,10 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
m_motioncells_idx_count = 0;
if (m_MotionCells.size () > 0)
m_MotionCells.clear ();
if (transparencyimg)
cvReleaseImage (&transparencyimg);
}
if (m_pprevFrame)
cvReleaseImage (&m_pprevFrame);
m_pprevFrame = cvCloneImage (m_pcurFrame);
m_pprevFrame = m_pcurFrame.clone ();
m_framecnt = 0;
if (m_pcurFrame)
cvReleaseImage (&m_pcurFrame);
if (m_pdifferenceImage)
cvReleaseImage (&m_pdifferenceImage);
if (m_pcurgreyImage)
cvReleaseImage (&m_pcurgreyImage);
if (m_pprevgreyImage)
cvReleaseImage (&m_pprevgreyImage);
if (m_pgreyImage)
cvReleaseImage (&m_pgreyImage);
if (m_pbwImage)
cvReleaseImage (&m_pbwImage);
if (m_pprevDown)
cvReleaseImage (&m_pprevDown);
if (m_pcurDown)
cvReleaseImage (&m_pcurDown);
if (m_pCells) {
for (int i = 0; i < m_gridy; ++i) {
delete[]m_pCells[i];
@ -290,27 +251,25 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
if (p_framerate <= 5) {
if (m_MotionCells.size () > 0)
m_MotionCells.clear ();
if (transparencyimg)
cvReleaseImage (&transparencyimg);
}
} else { //we do frame drop
m_motioncells_idx_count = 0;
ret = -2;
for (unsigned int i = 0; i < m_MotionCells.size (); i++) {
CvPoint pt1, pt2;
cv::Point pt1, pt2;
pt1.x = m_MotionCells.at (i).cell_pt1.x * 2;
pt1.y = m_MotionCells.at (i).cell_pt1.y * 2;
pt2.x = m_MotionCells.at (i).cell_pt2.x * 2;
pt2.y = m_MotionCells.at (i).cell_pt2.y * 2;
if (m_useAlpha && m_isVisible) {
cvRectangle (transparencyimg,
cv::rectangle (transparencyimg,
pt1,
pt2,
CV_RGB (motioncellscolor.B_channel_value,
motioncellscolor.G_channel_value,
motioncellscolor.R_channel_value), CV_FILLED);
motioncellscolor.R_channel_value), cv::FILLED);
} else if (m_isVisible) {
cvRectangle (p_frame,
cv::rectangle (p_frame,
pt1,
pt2,
CV_RGB (motioncellscolor.B_channel_value,
@ -447,8 +406,7 @@ MotionCells::calculateMotionPercentInCell (int p_row, int p_col,
for (int i = ybegin; i < yend; i++) {
for (int j = xbegin; j < xend; j++) {
cntpixelsnum++;
if ((((uchar *) (m_pbwImage->imageData + m_pbwImage->widthStep * i))[j]) >
0) {
if ((((uchar *) (m_pbwImage.data + m_pbwImage.step[0] * i))[j]) > 0) {
cntmotionpixelnum++;
if (cntmotionpixelnum >= thresholdmotionpixelnum) { //we dont needs calculate anymore
*p_motionarea = cntmotionpixelnum;
@ -487,7 +445,7 @@ MotionCells::calculateMotionPercentInMotionCells (motioncellidx *
mci.cell_pt2.y = floor ((double) (i + 1) * m_cellheight);
int w = mci.cell_pt2.x - mci.cell_pt1.x;
int h = mci.cell_pt2.y - mci.cell_pt1.y;
mci.motioncell = cvRect (mci.cell_pt1.x, mci.cell_pt1.y, w, h);
mci.motioncell = cv::Rect (mci.cell_pt1.x, mci.cell_pt1.y, w, h);
m_MotionCells.push_back (mci);
}
}
@ -512,7 +470,7 @@ MotionCells::calculateMotionPercentInMotionCells (motioncellidx *
mci.cell_pt2.y = floor ((double) (i + 1) * m_cellheight);
int w = mci.cell_pt2.x - mci.cell_pt1.x;
int h = mci.cell_pt2.y - mci.cell_pt1.y;
mci.motioncell = cvRect (mci.cell_pt1.x, mci.cell_pt1.y, w, h);
mci.motioncell = cv::Rect (mci.cell_pt1.x, mci.cell_pt1.y, w, h);
m_MotionCells.push_back (mci);
}
}
@ -523,10 +481,10 @@ void
MotionCells::performMotionMaskCoords (motionmaskcoordrect * p_motionmaskcoords,
int p_motionmaskcoords_count)
{
CvPoint upperleft;
cv::Point upperleft;
upperleft.x = 0;
upperleft.y = 0;
CvPoint lowerright;
cv::Point lowerright;
lowerright.x = 0;
lowerright.y = 0;
for (int i = 0; i < p_motionmaskcoords_count; i++) {
@ -534,8 +492,8 @@ MotionCells::performMotionMaskCoords (motionmaskcoordrect * p_motionmaskcoords,
upperleft.y = p_motionmaskcoords[i].upper_left_y;
lowerright.x = p_motionmaskcoords[i].lower_right_x;
lowerright.y = p_motionmaskcoords[i].lower_right_y;
cvRectangle (m_pbwImage, upperleft, lowerright, CV_RGB (0, 0, 0),
CV_FILLED);
cv::rectangle (m_pbwImage, upperleft, lowerright, CV_RGB (0, 0, 0),
cv::FILLED);
}
}
@ -552,7 +510,7 @@ MotionCells::performMotionMask (motioncellidx * p_motionmaskcellsidx,
(double) p_motionmaskcellsidx[k].lineidx * m_cellheight + m_cellheight;
for (int i = beginy; i < endy; i++)
for (int j = beginx; j < endx; j++) {
((uchar *) (m_pbwImage->imageData + m_pbwImage->widthStep * i))[j] = 0;
((uchar *) (m_pbwImage.data + m_pbwImage.step[0] * i))[j] = 0;
}
}
}
@ -560,17 +518,17 @@ MotionCells::performMotionMask (motioncellidx * p_motionmaskcellsidx,
///BGR if we use only OpenCV
//RGB if we use gst+OpenCV
void
MotionCells::blendImages (IplImage * p_actFrame, IplImage * p_cellsFrame,
MotionCells::blendImages (cv::Mat p_actFrame, cv::Mat p_cellsFrame,
float p_alpha, float p_beta)
{
int height = p_actFrame->height;
int width = p_actFrame->width;
int step = p_actFrame->widthStep / sizeof (uchar);
int channels = p_actFrame->nChannels;
int cellstep = p_cellsFrame->widthStep / sizeof (uchar);
uchar *curImageData = (uchar *) p_actFrame->imageData;
uchar *cellImageData = (uchar *) p_cellsFrame->imageData;
int height = p_actFrame.size ().height;
int width = p_actFrame.size ().width;
int step = p_actFrame.step[0] / sizeof (uchar);
int channels = p_actFrame.channels ();
int cellstep = p_cellsFrame.step[0] / sizeof (uchar);
uchar *curImageData = (uchar *) p_actFrame.data;
uchar *cellImageData = (uchar *) p_cellsFrame.data;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)

View file

@ -1,7 +1,7 @@
/*
* GStreamer
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2011 Nicola Murino <nicola.murino@gmail.com>
* Copyright (C) 2011 - 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -46,11 +46,6 @@
#define MOTIONCELLS_H_
#include <opencv2/core.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#endif
#include <fstream>
#include <vector>
#include <glib.h>
@ -109,18 +104,18 @@ struct Cell
struct MotionCellsIdx
{
CvRect motioncell;
cv::Rect motioncell;
//Points for the edges of the rectangle.
CvPoint cell_pt1;
CvPoint cell_pt2;
cv::Point cell_pt1;
cv::Point cell_pt2;
int lineidx;
int colidx;
};
struct OverlayRegions
{
CvPoint upperleft;
CvPoint lowerright;
cv::Point upperleft;
cv::Point lowerright;
};
class MotionCells
@ -130,7 +125,7 @@ public:
MotionCells ();
virtual ~ MotionCells ();
int performDetectionMotionCells (IplImage * p_frame, double p_sensitivity,
int performDetectionMotionCells (cv::Mat p_frame, double p_sensitivity,
double p_framerate, int p_gridx, int p_gridy, gint64 timestamp_millisec,
bool p_isVisble, bool p_useAlpha, int motionmaskcoord_count,
motionmaskcoordrect * motionmaskcoords, int motionmaskcells_count,
@ -138,9 +133,9 @@ public:
int motioncells_count, motioncellidx * motioncellsidx, gint64 starttime,
char *datafile, bool p_changed_datafile, int p_thickness);
void setPrevFrame (IplImage * p_prevframe)
void setPrevFrame (cv::Mat p_prevframe)
{
m_pprevFrame = cvCloneImage (p_prevframe);
m_pprevFrame = p_prevframe.clone();
}
char *getMotionCellsIdx ()
{
@ -198,26 +193,26 @@ private:
p_motionmaskcellsidx, int p_motionmaskcells_count = 0);
int saveMotionCells (gint64 timestamp_millisec);
int initDataFile (char *p_datafile, gint64 starttime);
void blendImages (IplImage * p_actFrame, IplImage * p_cellsFrame,
void blendImages (cv::Mat p_actFrame, cv::Mat p_cellsFrame,
float p_alpha, float p_beta);
void setData (IplImage * img, int lin, int col, uchar valor)
void setData (cv::Mat img, int lin, int col, uchar valor)
{
((uchar *) (img->imageData + img->widthStep * lin))[col] = valor;
((uchar *) (img.data + img.step[0] * lin))[col] = valor;
}
uchar getData (IplImage * img, int lin, int col)
uchar getData (cv::Mat img, int lin, int col)
{
return ((uchar *) (img->imageData + img->widthStep * lin))[col];
return ((uchar *) (img.data + img.step[0] * lin))[col];
}
bool getIsNonZero (IplImage * img)
bool getIsNonZero (cv::Mat img)
{
int lin, col;
for (lin = 0; lin < img->height; lin++)
for (col = 0; col < img->width; col++) {
if ((((uchar *) (img->imageData + img->widthStep * lin))[col]) > 0)
for (lin = 0; lin < img.size().height; lin++)
for (col = 0; col < img.size().width; col++) {
if ((((uchar *) (img.data + img.step[0] * lin))[col]) > 0)
return true;
}
return false;
@ -243,8 +238,8 @@ private:
}
}
IplImage *m_pcurFrame, *m_pprevFrame, *m_pdifferenceImage,
*m_pbwImage,*transparencyimg;
cv::Mat m_pprevFrame, m_pdifferenceImage, m_pbwImage, m_pcurFrame,
transparencyimg;
bool m_isVisible, m_changed_datafile, m_useAlpha, m_saveInDatafile;
Cell **m_pCells;
vector < MotionCellsIdx > m_MotionCells;

View file

@ -39,7 +39,7 @@ gst_camera_event_new_calibrated (gchar * settings)
GstStructure *s;
s = gst_structure_new (GST_CAMERA_EVENT_CALIBRATED_NAME,
"undistort-settings", G_TYPE_STRING, g_strdup(settings), NULL);
"undistort-settings", G_TYPE_STRING, g_strdup (settings), NULL);
calibrated_event = gst_event_new_custom (GST_EVENT_CUSTOM_BOTH, s);
@ -66,16 +66,16 @@ gst_camera_event_parse_calibrated (GstEvent * event, gchar ** settings)
g_return_val_if_fail (event != NULL, FALSE);
if (GST_EVENT_TYPE (event) != GST_EVENT_CUSTOM_BOTH)
return FALSE; /* Not a calibrated event */
return FALSE; /* Not a calibrated event */
s = gst_event_get_structure (event);
if (s == NULL
|| !gst_structure_has_name (s, GST_CAMERA_EVENT_CALIBRATED_NAME))
return FALSE; /* Not a calibrated event */
const gchar *str = gst_structure_get_string(s, "undistort-settings");
const gchar *str = gst_structure_get_string (s, "undistort-settings");
if (!str)
return FALSE; /* Not calibrated frame event */
return FALSE; /* Not calibrated frame event */
*settings = g_strdup (str);

View file

@ -85,9 +85,6 @@
#include "gstcameracalibrate.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
#include <opencv2/calib3d.hpp>
#include <gst/opencv/gstopencvutils.h>
@ -172,7 +169,7 @@ static void gst_camera_calibrate_get_property (GObject * object, guint prop_id,
static GstFlowReturn
gst_camera_calibrate_transform_frame_ip (GstOpencvVideoFilter * cvfilter,
GstBuffer * frame, IplImage * img);
GstBuffer * frame, cv::Mat img);
/* clean up */
static void
@ -455,14 +452,14 @@ gst_camera_calibrate_get_property (GObject * object, guint prop_id,
}
}
void camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img);
void camera_calibrate_run (GstCameraCalibrate * calib, cv::Mat img);
/*
* Performs the camera calibration
*/
static GstFlowReturn
gst_camera_calibrate_transform_frame_ip (GstOpencvVideoFilter * cvfilter,
G_GNUC_UNUSED GstBuffer * frame, IplImage * img)
G_GNUC_UNUSED GstBuffer * frame, cv::Mat img)
{
GstCameraCalibrate *calib = GST_CAMERA_CALIBRATE (cvfilter);
@ -476,14 +473,13 @@ bool camera_calibrate_calibrate (GstCameraCalibrate * calib,
std::vector < std::vector < cv::Point2f > >imagePoints);
void
camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
camera_calibrate_run (GstCameraCalibrate * calib, cv::Mat img)
{
cv::Mat view = cv::cvarrToMat (img);
// For camera only take new samples after delay time
if (calib->mode == CAPTURING) {
// get_input
cv::Size imageSize = view.size ();
cv::Size imageSize = img.size ();
/* find_pattern
* FIXME find ways to reduce CPU usage
@ -506,15 +502,15 @@ camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
switch (calib->calibrationPattern) {
case GST_CAMERA_CALIBRATION_PATTERN_CHESSBOARD:
found =
cv::findChessboardCorners (view, calib->boardSize, pointBuf,
cv::findChessboardCorners (img, calib->boardSize, pointBuf,
chessBoardFlags);
break;
case GST_CAMERA_CALIBRATION_PATTERN_CIRCLES_GRID:
found = cv::findCirclesGrid (view, calib->boardSize, pointBuf);
found = cv::findCirclesGrid (img, calib->boardSize, pointBuf);
break;
case GST_CAMERA_CALIBRATION_PATTERN_ASYMMETRIC_CIRCLES_GRID:
found =
cv::findCirclesGrid (view, calib->boardSize, pointBuf,
cv::findCirclesGrid (img, calib->boardSize, pointBuf,
cv::CALIB_CB_ASYMMETRIC_GRID);
break;
default:
@ -531,7 +527,7 @@ camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
* the color convert should be done once (if needed) and shared
* FIXME keep viewGray around to avoid reallocating it each time... */
cv::Mat viewGray;
cv::cvtColor (view, viewGray, cv::COLOR_BGR2GRAY);
cv::cvtColor (img, viewGray, cv::COLOR_BGR2GRAY);
cv::cornerSubPix (viewGray, pointBuf, cv::Size (11, 11), cv::Size (-1,
-1),
cv::TermCriteria (cv::TermCriteria::EPS + cv::TermCriteria::COUNT,
@ -549,7 +545,7 @@ camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
/* draw the corners */
if (calib->showCorners) {
cv::drawChessboardCorners (view, calib->boardSize, cv::Mat (pointBuf),
cv::drawChessboardCorners (img, calib->boardSize, cv::Mat (pointBuf),
found);
}
}
@ -598,7 +594,7 @@ camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
}
if (calib->mode == CAPTURING && blinkOutput) {
bitwise_not (view, view);
bitwise_not (img, img);
}
}
@ -613,8 +609,8 @@ camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
(calib->mode == CALIBRATED) ? "Calibrated" : "Waiting...";
int baseLine = 0;
cv::Size textSize = cv::getTextSize (msg, 1, 1, 1, &baseLine);
cv::Point textOrigin (view.cols - 2 * textSize.width - 10,
view.rows - 2 * baseLine - 10);
cv::Point textOrigin (img.cols - 2 * textSize.width - 10,
img.rows - 2 * baseLine - 10);
if (calib->mode == CAPTURING) {
msg =
@ -624,7 +620,7 @@ camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
const cv::Scalar RED (0, 0, 255);
const cv::Scalar GREEN (0, 255, 0);
cv::putText (view, msg, textOrigin, 1, 1,
cv::putText (img, msg, textOrigin, 1, 1,
calib->mode == CALIBRATED ? GREEN : RED);
}

View file

@ -61,7 +61,7 @@
* gst-launch-1.0 -v v4l2src ! videoconvert ! cameraundistort settings="???" ! autovideosink
* ]| will correct camera distortion based on provided settings.
* |[
* gst-launch-1.0 -v v4l2src ! videoconvert ! cameraundistort ! cameracalibrate | autovideosink
* gst-launch-1.0 -v v4l2src ! videoconvert ! cameraundistort ! cameracalibrate ! autovideosink
* ]| will correct camera distortion once camera calibration is done.
* </refsect2>
*/
@ -78,9 +78,6 @@
#include "gstcameraundistort.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
#include <opencv2/calib3d.hpp>
#include <gst/opencv/gstopencvutils.h>
@ -111,19 +108,19 @@ static void gst_camera_undistort_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static gboolean gst_camera_undistort_set_info (GstOpencvVideoFilter * cvfilter,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static GstFlowReturn gst_camera_undistort_transform_frame (GstOpencvVideoFilter
* cvfilter, GstBuffer * frame, IplImage * img, GstBuffer * outframe,
IplImage * outimg);
* cvfilter, GstBuffer * frame, cv::Mat img, GstBuffer * outframe,
cv::Mat outimg);
static gboolean gst_camera_undistort_sink_event (GstBaseTransform * trans,
GstEvent * event);
static gboolean gst_camera_undistort_src_event (GstBaseTransform * trans,
GstEvent * event);
static void camera_undistort_run (GstCameraUndistort * undist, IplImage * img,
IplImage * outimg);
static void camera_undistort_run (GstCameraUndistort * undist, cv::Mat img,
cv::Mat outimg);
static gboolean camera_undistort_init_undistort_rectify_map (GstCameraUndistort
* undist);
@ -279,13 +276,11 @@ gst_camera_undistort_get_property (GObject * object, guint prop_id,
gboolean
gst_camera_undistort_set_info (GstOpencvVideoFilter * cvfilter,
gint in_width, gint in_height,
__attribute__((unused)) gint in_depth,
__attribute__((unused)) gint in_channels,
gint in_width, gint in_height, __attribute__((unused))
int in_cv_type,
__attribute__((unused)) gint out_width,
__attribute__((unused)) gint out_height,
__attribute__((unused)) gint out_depth,
__attribute__((unused)) gint out_channels)
__attribute__((unused)) gint out_height, __attribute__((unused))
int out_cv_type)
{
GstCameraUndistort *undist = GST_CAMERA_UNDISTORT (cvfilter);
@ -299,8 +294,8 @@ gst_camera_undistort_set_info (GstOpencvVideoFilter * cvfilter,
*/
static GstFlowReturn
gst_camera_undistort_transform_frame (GstOpencvVideoFilter * cvfilter,
G_GNUC_UNUSED GstBuffer * frame, IplImage * img,
G_GNUC_UNUSED GstBuffer * outframe, IplImage * outimg)
G_GNUC_UNUSED GstBuffer * frame, cv::Mat img,
G_GNUC_UNUSED GstBuffer * outframe, cv::Mat outimg)
{
GstCameraUndistort *undist = GST_CAMERA_UNDISTORT (cvfilter);
@ -310,12 +305,8 @@ gst_camera_undistort_transform_frame (GstOpencvVideoFilter * cvfilter,
}
static void
camera_undistort_run (GstCameraUndistort * undist, IplImage * img,
IplImage * outimg)
camera_undistort_run (GstCameraUndistort * undist, cv::Mat img, cv::Mat outimg)
{
const cv::Mat view = cv::cvarrToMat (img);
cv::Mat outview = cv::cvarrToMat (outimg);
/* TODO is settingsChanged handling thread safe ? */
if (undist->settingsChanged) {
/* settings have changed, need to recompute undistort */
@ -332,16 +323,16 @@ camera_undistort_run (GstCameraUndistort * undist, IplImage * img,
if (undist->showUndistorted && undist->doUndistort) {
/* do the undistort */
cv::remap (view, outview, undist->map1, undist->map2, cv::INTER_LINEAR);
cv::remap (img, outimg, undist->map1, undist->map2, cv::INTER_LINEAR);
if (undist->crop) {
/* TODO do the cropping */
const cv::Scalar CROP_COLOR (0, 255, 0);
cv::rectangle (outview, undist->validPixROI, CROP_COLOR);
cv::rectangle (outimg, undist->validPixROI, CROP_COLOR);
}
} else {
/* FIXME should use pass through to avoid this copy when not undistorting */
view.copyTo (outview);
img.copyTo (outimg);
}
}

View file

@ -59,10 +59,8 @@
#endif
#include "gstcvdilate.h"
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_cv_dilate_debug);
#define GST_CAT_DEFAULT gst_cv_dilate_debug
@ -70,7 +68,7 @@ GST_DEBUG_CATEGORY_STATIC (gst_cv_dilate_debug);
G_DEFINE_TYPE (GstCvDilate, gst_cv_dilate, GST_TYPE_CV_DILATE_ERODE);
static GstFlowReturn gst_cv_dilate_transform_ip (GstOpencvVideoFilter *
filter, GstBuffer * buf, IplImage * img);
filter, GstBuffer * buf, cv::Mat img);
/* initialize the cvdilate's class */
static void
@ -101,11 +99,11 @@ gst_cv_dilate_init (GstCvDilate * filter)
static GstFlowReturn
gst_cv_dilate_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
cv::Mat img)
{
GstCvDilateErode *filter = GST_CV_DILATE_ERODE (base);
cvDilate (img, img, NULL, filter->iterations);
cv::dilate (img, img, cv::Mat (), cv::Point (-1, -1), filter->iterations);
return GST_FLOW_OK;
}

View file

@ -62,9 +62,6 @@
#include "gstcvequalizehist.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_cv_equalize_hist_debug);
#define GST_CAT_DEFAULT gst_cv_equalize_hist_debug
@ -83,8 +80,7 @@ G_DEFINE_TYPE (GstCvEqualizeHist, gst_cv_equalize_hist,
GST_TYPE_OPENCV_VIDEO_FILTER);
static GstFlowReturn gst_cv_equalize_hist_transform (GstOpencvVideoFilter *
filter, GstBuffer * buf, IplImage * img, GstBuffer * outbuf,
IplImage * outimg);
filter, GstBuffer * buf, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg);
static void
@ -115,9 +111,9 @@ gst_cv_equalize_hist_init (GstCvEqualizeHist * filter)
static GstFlowReturn
gst_cv_equalize_hist_transform (GstOpencvVideoFilter * base,
GstBuffer * buf, IplImage * img, GstBuffer * outbuf, IplImage * outimg)
GstBuffer * buf, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg)
{
cvEqualizeHist (img, outimg);
cv::equalizeHist (img, outimg);
return GST_FLOW_OK;
}

View file

@ -60,9 +60,7 @@
#include "gstcverode.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_cv_erode_debug);
#define GST_CAT_DEFAULT gst_cv_erode_debug
@ -70,7 +68,7 @@ GST_DEBUG_CATEGORY_STATIC (gst_cv_erode_debug);
G_DEFINE_TYPE (GstCvErode, gst_cv_erode, GST_TYPE_CV_DILATE_ERODE);
static GstFlowReturn gst_cv_erode_transform_ip (GstOpencvVideoFilter *
filter, GstBuffer * buf, IplImage * img);
filter, GstBuffer * buf, cv::Mat img);
/* initialize the cverode's class */
static void
@ -101,11 +99,11 @@ gst_cv_erode_init (GstCvErode * filter)
static GstFlowReturn
gst_cv_erode_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
cv::Mat img)
{
GstCvDilateErode *filter = GST_CV_DILATE_ERODE (base);
cvErode (img, img, NULL, filter->iterations);
cv::erode (img, img, cv::Mat (), cv::Point (-1, -1), filter->iterations);
return GST_FLOW_OK;
}

View file

@ -60,10 +60,6 @@
#include "gstcvlaplace.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/core_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_cv_laplace_debug);
@ -109,11 +105,11 @@ static void gst_cv_laplace_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_cv_laplace_transform (GstOpencvVideoFilter * filter,
GstBuffer * buf, IplImage * img, GstBuffer * outbuf, IplImage * outimg);
GstBuffer * buf, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg);
static gboolean gst_cv_laplace_cv_set_caps (GstOpencvVideoFilter * trans,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
/* Clean up */
static void
@ -121,11 +117,9 @@ gst_cv_laplace_finalize (GObject * obj)
{
GstCvLaplace *filter = GST_CV_LAPLACE (obj);
if (filter->intermediary_img) {
cvReleaseImage (&filter->intermediary_img);
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->Laplace);
}
filter->intermediary_img.release ();
filter->cvGray.release ();
filter->Laplace.release ();
G_OBJECT_CLASS (gst_cv_laplace_parent_class)->finalize (obj);
}
@ -192,23 +186,14 @@ gst_cv_laplace_init (GstCvLaplace * filter)
static gboolean
gst_cv_laplace_cv_set_caps (GstOpencvVideoFilter * trans, gint in_width,
gint in_height, gint in_depth, gint in_channels, gint out_width,
gint out_height, gint out_depth, gint out_channels)
gint in_height, int in_cv_type, gint out_width,
gint out_height, int out_cv_type)
{
GstCvLaplace *filter = GST_CV_LAPLACE (trans);
if (filter->intermediary_img != NULL) {
cvReleaseImage (&filter->intermediary_img);
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->Laplace);
}
filter->intermediary_img =
cvCreateImage (cvSize (out_width, out_height), IPL_DEPTH_16S, 1);
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->Laplace =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->intermediary_img.create (cv::Size (out_width, out_height), CV_16SC1);
filter->cvGray.create (cv::Size (in_width, in_height), CV_8UC1);
filter->Laplace.create (cv::Size (in_width, in_height), CV_8UC1);
return TRUE;
}
@ -271,22 +256,21 @@ gst_cv_laplace_get_property (GObject * object, guint prop_id,
static GstFlowReturn
gst_cv_laplace_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img, GstBuffer * outbuf, IplImage * outimg)
cv::Mat img, GstBuffer * outbuf, cv::Mat outimg)
{
GstCvLaplace *filter = GST_CV_LAPLACE (base);
g_assert (filter->intermediary_img);
cv::cvtColor (img, filter->cvGray, cv::COLOR_RGB2GRAY);
cv::Laplacian (filter->cvGray, filter->intermediary_img,
filter->intermediary_img.depth (), filter->aperture_size);
filter->intermediary_img.convertTo (filter->Laplace, filter->Laplace.type (),
filter->scale, filter->shift);
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvLaplace (filter->cvGray, filter->intermediary_img, filter->aperture_size);
cvConvertScale (filter->intermediary_img, filter->Laplace, filter->scale,
filter->shift);
cvZero (outimg);
outimg.setTo (cv::Scalar::all (0));
if (filter->mask) {
cvCopy (img, outimg, filter->Laplace);
img.copyTo (outimg, filter->Laplace);
} else {
cvCvtColor (filter->Laplace, outimg, CV_GRAY2RGB);
cv::cvtColor (filter->Laplace, outimg, cv::COLOR_GRAY2RGB);
}
return GST_FLOW_OK;

View file

@ -72,9 +72,9 @@ struct _GstCvLaplace
gdouble shift;
gboolean mask;
IplImage *intermediary_img;
IplImage *cvGray;
IplImage *Laplace;
cv::Mat intermediary_img;
cv::Mat cvGray;
cv::Mat Laplace;
};
struct _GstCvLaplaceClass

View file

@ -61,9 +61,6 @@
#include "gst/opencv/gstopencvutils.h"
#include "gstcvsmooth.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_cv_smooth_debug);
@ -99,6 +96,15 @@ enum
* Keep it deactivated for now.
*/
enum GstCvSmoothMethod
{
GST_SMOOTH_BLUR = 1,
GST_SMOOTH_GAUSSIAN = 2,
GST_SMOOTH_MEDIAN = 3,
GST_SMOOTH_BILATERAL = 4
};
#define GST_TYPE_CV_SMOOTH_TYPE (gst_cv_smooth_type_get_type ())
static GType
gst_cv_smooth_type_get_type (void)
@ -106,10 +112,10 @@ gst_cv_smooth_type_get_type (void)
static GType cv_smooth_type_type = 0;
static const GEnumValue smooth_types[] = {
{CV_BLUR, "CV Blur", "blur"},
{CV_GAUSSIAN, "CV Gaussian", "gaussian"},
{CV_MEDIAN, "CV Median", "median"},
{CV_BILATERAL, "CV Bilateral", "bilateral"},
{GST_SMOOTH_BLUR, "CV Blur", "blur"},
{GST_SMOOTH_GAUSSIAN, "CV Gaussian", "gaussian"},
{GST_SMOOTH_MEDIAN, "CV Median", "median"},
{GST_SMOOTH_BILATERAL, "CV Bilateral", "bilateral"},
{0, NULL, NULL},
};
@ -120,7 +126,7 @@ gst_cv_smooth_type_get_type (void)
return cv_smooth_type_type;
}
#define DEFAULT_CV_SMOOTH_TYPE CV_GAUSSIAN
#define DEFAULT_CV_SMOOTH_TYPE GST_SMOOTH_GAUSSIAN
#define DEFAULT_KERNELWIDTH 3
#define DEFAULT_KERNELHEIGHT 3
#define DEFAULT_COLORSIGMA 0.0
@ -138,7 +144,7 @@ static void gst_cv_smooth_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_cv_smooth_transform_ip (GstOpencvVideoFilter *
filter, GstBuffer * buf, IplImage * img);
filter, GstBuffer * buf, Mat img);
/* initialize the cvsmooth's class */
static void
@ -261,8 +267,8 @@ gst_cv_smooth_change_type (GstCvSmooth * filter, gint value)
filter->type = value;
switch (value) {
case CV_GAUSSIAN:
case CV_BLUR:
case GST_SMOOTH_GAUSSIAN:
case GST_SMOOTH_BLUR:
gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER_CAST
(filter), TRUE);
break;
@ -371,14 +377,13 @@ gst_cv_smooth_get_property (GObject * object, guint prop_id,
static GstFlowReturn
gst_cv_smooth_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
Mat img)
{
GstCvSmooth *filter = GST_CV_SMOOTH (base);
Mat mat = cvarrToMat (img);
if (filter->positionx != 0 || filter->positiony != 0 ||
filter->width != G_MAXINT || filter->height != G_MAXINT) {
Size mat_size = mat.size ();
Size mat_size = img.size ();
/* if the effect would start outside the image, just skip it */
if (filter->positionx >= mat_size.width
@ -393,23 +398,23 @@ gst_cv_smooth_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
MIN (filter->width, mat_size.width - filter->positionx),
MIN (filter->height, mat_size.height - filter->positiony));
mat = mat (mat_rect);
img = img (mat_rect);
}
switch (filter->type) {
case CV_BLUR:
blur (mat, mat, Size (filter->kernelwidth, filter->kernelheight),
case GST_SMOOTH_BLUR:
blur (img, img, Size (filter->kernelwidth, filter->kernelheight),
Point (-1, -1));
break;
case CV_GAUSSIAN:
GaussianBlur (mat, mat, Size (filter->kernelwidth, filter->kernelheight),
case GST_SMOOTH_GAUSSIAN:
GaussianBlur (img, img, Size (filter->kernelwidth, filter->kernelheight),
filter->colorsigma, filter->colorsigma);
break;
case CV_MEDIAN:
medianBlur (mat, mat, filter->kernelwidth);
case GST_SMOOTH_MEDIAN:
medianBlur (img, img, filter->kernelwidth);
break;
case CV_BILATERAL:
bilateralFilter (mat, mat, -1, filter->colorsigma, 0.0);
case GST_SMOOTH_BILATERAL:
bilateralFilter (img, img, -1, filter->colorsigma, 0.0);
break;
default:
break;

View file

@ -60,9 +60,6 @@
#include "gstcvsobel.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_cv_sobel_debug);
#define GST_CAT_DEFAULT gst_cv_sobel_debug
@ -107,10 +104,10 @@ static void gst_cv_sobel_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_cv_sobel_transform (GstOpencvVideoFilter * filter,
GstBuffer * buf, IplImage * img, GstBuffer * outbuf, IplImage * outimg);
GstBuffer * buf, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg);
static gboolean gst_cv_sobel_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
/* Clean up */
static void
@ -118,10 +115,8 @@ gst_cv_sobel_finalize (GObject * obj)
{
GstCvSobel *filter = GST_CV_SOBEL (obj);
if (filter->cvSobel != NULL) {
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvSobel);
}
filter->cvGray.release ();
filter->cvSobel.release ();
G_OBJECT_CLASS (gst_cv_sobel_parent_class)->finalize (obj);
}
@ -189,20 +184,13 @@ gst_cv_sobel_init (GstCvSobel * filter)
/* this function handles the link with other elements */
static gboolean
gst_cv_sobel_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstCvSobel *filter = GST_CV_SOBEL (transform);
if (filter->cvSobel != NULL) {
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvSobel);
}
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvSobel =
cvCreateImage (cvSize (out_width, out_height), IPL_DEPTH_8U, 1);
filter->cvGray.create (cv::Size (in_width, in_height), CV_8UC1);
filter->cvSobel.create (cv::Size (out_width, out_height), CV_8UC1);
return TRUE;
}
@ -265,19 +253,19 @@ gst_cv_sobel_get_property (GObject * object, guint prop_id,
static GstFlowReturn
gst_cv_sobel_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img, GstBuffer * outbuf, IplImage * outimg)
cv::Mat img, GstBuffer * outbuf, cv::Mat outimg)
{
GstCvSobel *filter = GST_CV_SOBEL (base);
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvSobel (filter->cvGray, filter->cvSobel, filter->x_order, filter->y_order,
filter->aperture_size);
cv::cvtColor (img, filter->cvGray, cv::COLOR_RGB2GRAY);
cv::Sobel (filter->cvGray, filter->cvSobel, filter->cvGray.depth (),
filter->x_order, filter->y_order, filter->aperture_size);
cvZero (outimg);
outimg.setTo (cv::Scalar::all (0));
if (filter->mask) {
cvCopy (img, outimg, filter->cvSobel);
img.copyTo (outimg, filter->cvSobel);
} else {
cvCvtColor (filter->cvSobel, outimg, CV_GRAY2RGB);
cv::cvtColor (filter->cvSobel, outimg, cv::COLOR_GRAY2RGB);
}
return GST_FLOW_OK;

View file

@ -72,8 +72,8 @@ struct _GstCvSobel
gint aperture_size;
gboolean mask;
IplImage *cvGray;
IplImage *cvSobel;
cv::Mat cvGray;
cv::Mat cvSobel;
};
struct _GstCvSobelClass

View file

@ -1,6 +1,6 @@
/*
* GStreamer
* Copyright (C) 2016 Prassel S.r.l
* Copyright (C) 2016 - 2018 Prassel S.r.l
* Author: Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -151,26 +151,19 @@ static GstCaps *gst_dewarp_transform_caps (GstBaseTransform * trans,
GstPadDirection direction, GstCaps * caps, GstCaps * filter_caps);
static GstFlowReturn gst_dewarp_transform_frame (GstOpencvVideoFilter * btrans,
GstBuffer * buffer, IplImage * img, GstBuffer * outbuf, IplImage * outimg);
GstBuffer * buffer, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg);
static gboolean gst_dewarp_set_caps (GstOpencvVideoFilter * filter,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static void
gst_dewarp_finalize (GObject * obj)
{
GstDewarp *filter = GST_DEWARP (obj);
if (filter->map_x) {
filter->map_x->release ();
delete filter->map_x;
}
if (filter->map_y) {
filter->map_y->release ();
delete filter->map_y;
}
filter->map_x.release ();
filter->map_y.release ();
G_OBJECT_CLASS (gst_dewarp_parent_class)->finalize (obj);
}
@ -285,9 +278,6 @@ gst_dewarp_init (GstDewarp * filter)
filter->out_height = 0;
filter->need_map_update = TRUE;
filter->map_x = new cv::Mat;
filter->map_y = new cv::Mat;
gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER_CAST (filter),
FALSE);
}
@ -463,8 +453,8 @@ gst_dewarp_update_map (GstDewarp * filter)
cx = filter->x_center * filter->in_width;
cy = filter->y_center * filter->in_height;
cv::Size destSize (out_width, out_height);
filter->map_x->create (destSize, CV_32FC1);
filter->map_y->create (destSize, CV_32FC1);
filter->map_x.create (destSize, CV_32FC1);
filter->map_y.create (destSize, CV_32FC1);
for (y = 0; y < out_height; y++) {
for (x = 0; x < out_width; x++) {
@ -472,8 +462,8 @@ gst_dewarp_update_map (GstDewarp * filter)
float theta = ((float) (x) / (float) (out_width)) * 2.0 * G_PI;
float xs = cx + r * sin (theta) * filter->remap_correction_x;
float ys = cy + r * cos (theta) * filter->remap_correction_y;
filter->map_x->at < float >(y, x) = xs;
filter->map_y->at < float >(y, x) = ys;
filter->map_x.at < float >(y, x) = xs;
filter->map_y.at < float >(y, x) = ys;
}
}
@ -600,8 +590,8 @@ gst_dewarp_transform_caps (GstBaseTransform * trans,
static gboolean
gst_dewarp_set_caps (GstOpencvVideoFilter * filter,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstDewarp *dewarp = GST_DEWARP (filter);
@ -625,17 +615,17 @@ gst_dewarp_set_caps (GstOpencvVideoFilter * filter,
static GstFlowReturn
gst_dewarp_transform_frame (GstOpencvVideoFilter * btrans, GstBuffer * buffer,
IplImage * img, GstBuffer * outbuf, IplImage * outimg)
cv::Mat img, GstBuffer * outbuf, cv::Mat outimg)
{
GstDewarp *filter = GST_DEWARP (btrans);
GstFlowReturn ret;
GST_OBJECT_LOCK (filter);
if (img->width == filter->in_width
&& img->height == filter->in_height
&& outimg->width == filter->out_width
&& outimg->height == filter->out_height) {
if (img.size ().width == filter->in_width
&& img.size ().height == filter->in_height
&& outimg.size ().width == filter->out_width
&& outimg.size ().height == filter->out_height) {
cv::Mat fisheye_image, dewarped_image;
int inter_mode;
@ -662,11 +652,11 @@ gst_dewarp_transform_frame (GstOpencvVideoFilter * btrans, GstBuffer * buffer,
break;
}
fisheye_image = cv::cvarrToMat (img, false);
dewarped_image = cv::cvarrToMat (outimg, false);
fisheye_image = img;
dewarped_image = outimg;
if (filter->display_mode == GST_DEWARP_DISPLAY_PANORAMA) {
cv::remap (fisheye_image, dewarped_image, *filter->map_x, *filter->map_y,
cv::remap (fisheye_image, dewarped_image, filter->map_x, filter->map_y,
inter_mode);
} else if (filter->display_mode == GST_DEWARP_DISPLAY_DOUBLE_PANORAMA) {
cv::Mat view1, view2, panorama_image, concatenated;
@ -675,7 +665,7 @@ gst_dewarp_transform_frame (GstOpencvVideoFilter * btrans, GstBuffer * buffer,
panorama_height = filter->out_height / 2;
cv::Size panoramaSize (panorama_width, panorama_height);
panorama_image.create (panoramaSize, fisheye_image.type ());
cv::remap (fisheye_image, panorama_image, *filter->map_x, *filter->map_y,
cv::remap (fisheye_image, panorama_image, filter->map_x, filter->map_y,
inter_mode);
view1 =
panorama_image (cv::Rect (0, 0, filter->out_width, panorama_height));
@ -695,7 +685,7 @@ gst_dewarp_transform_frame (GstOpencvVideoFilter * btrans, GstBuffer * buffer,
view_height = filter->out_height / 2;
cv::Size panoramaSize (panorama_width, panorama_height);
panorama_image.create (panoramaSize, fisheye_image.type ());
cv::remap (fisheye_image, panorama_image, *filter->map_x, *filter->map_y,
cv::remap (fisheye_image, panorama_image, filter->map_x, filter->map_y,
inter_mode);
view1 = panorama_image (cv::Rect (0, 0, view_width, view_height));
view2 =

View file

@ -1,6 +1,6 @@
/*
* GStreamer
* Copyright (C) 2016 Prassel S.r.l
* Copyright (C) 2016 - 2018 Prassel S.r.l
* Author: Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -47,11 +47,7 @@
#include <gst/gst.h>
#include <gst/opencv/gstopencvvideofilter.h>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
G_BEGIN_DECLS
/* #defines don't like whitespacey bits */
@ -84,8 +80,8 @@ enum _GstDewarpInterpolationMode {
struct _GstDewarp
{
GstOpencvVideoFilter element;
cv::Mat *map_x;
cv::Mat *map_y;
cv::Mat map_x;
cv::Mat map_y;
gdouble x_center;
gdouble y_center;
gdouble inner_radius;

View file

@ -100,7 +100,7 @@
* <refsect2>
* <title>Example launch line</title>
* |[
* gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! disp0.sink_right videotestsrc ! video/x-raw,width=320,height=240 ! disp0.sink_left disparity name=disp0 ! videoconvert ! ximagesink
* gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! videoconvert ! disp0.sink_right videotestsrc ! video/x-raw,width=320,height=240 ! videoconvert ! disp0.sink_left disparity name=disp0 ! videoconvert ! ximagesink
* ]|
* Another example, with two png files representing a classical stereo matching,
* downloadable from http://vision.middlebury.edu/stereo/submit/tsukuba/im4.png and
@ -121,9 +121,6 @@ gst-launch-1.0 multifilesrc location=~/im3.png ! pngdec ! videoconvert ! di
#include "gstdisparity.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_disparity_debug);
#define GST_CAT_DEFAULT gst_disparity_debug
@ -198,14 +195,12 @@ static GstFlowReturn gst_disparity_chain_right (GstPad * pad,
GstObject * parent, GstBuffer * buffer);
static GstFlowReturn gst_disparity_chain_left (GstPad * pad, GstObject * parent,
GstBuffer * buffer);
static void gst_disparity_release_all_pointers (GstDisparity * filter);
static void initialise_disparity (GstDisparity * fs, int width, int height,
int nchannels);
static int initialise_sbm (GstDisparity * filter);
static int run_sbm_iteration (GstDisparity * filter);
static int run_sgbm_iteration (GstDisparity * filter);
static int finalise_sbm (GstDisparity * filter);
/* initialize the disparity's class */
static void
@ -437,27 +432,25 @@ gst_disparity_handle_query (GstPad * pad, GstObject * parent, GstQuery * query)
return ret;
}
static void
gst_disparity_release_all_pointers (GstDisparity * filter)
{
cvReleaseImage (&filter->cvRGB_right);
cvReleaseImage (&filter->cvRGB_left);
cvReleaseImage (&filter->cvGray_depth_map1);
cvReleaseImage (&filter->cvGray_right);
cvReleaseImage (&filter->cvGray_left);
cvReleaseImage (&filter->cvGray_depth_map2);
cvReleaseImage (&filter->cvGray_depth_map1_2);
finalise_sbm (filter);
}
static void
gst_disparity_finalize (GObject * object)
{
GstDisparity *filter;
filter = GST_DISPARITY (object);
gst_disparity_release_all_pointers (filter);
filter->cvRGB_right.release ();
filter->cvRGB_left.release ();
filter->cvGray_right.release ();
filter->cvGray_left.release ();
filter->cvGray_depth_map1.release ();
filter->cvGray_depth_map2.release ();
filter->cvGray_depth_map1_2.release ();
filter->img_right_as_cvMat_gray.release ();
filter->img_left_as_cvMat_gray.release ();
filter->depth_map_as_cvMat.release ();
filter->sbm.release ();
filter->sgbm.release ();
gst_caps_replace (&filter->caps, NULL);
@ -495,8 +488,8 @@ gst_disparity_chain_left (GstPad * pad, GstObject * parent, GstBuffer * buffer)
if (!gst_buffer_map (buffer, &info, (GstMapFlags) GST_MAP_READWRITE)) {
return GST_FLOW_ERROR;
}
if (fs->cvRGB_left)
fs->cvRGB_left->imageData = (char *) info.data;
fs->cvRGB_left.data = (unsigned char *) info.data;
fs->cvRGB_left.datastart = (unsigned char *) info.data;
GST_DEBUG_OBJECT (pad, "signalled right");
g_cond_signal (&fs->cond);
@ -532,9 +525,9 @@ gst_disparity_chain_right (GstPad * pad, GstObject * parent, GstBuffer * buffer)
g_mutex_unlock (&fs->lock);
return GST_FLOW_ERROR;
}
if (fs->cvRGB_right)
fs->cvRGB_right->imageData = (char *) info.data;
fs->cvRGB_right.data = (unsigned char *) info.data;
fs->cvRGB_right.datastart = (unsigned char *) info.data;
/* Here do the business */
GST_INFO_OBJECT (pad,
@ -559,24 +552,24 @@ gst_disparity_chain_right (GstPad * pad, GstObject * parent, GstBuffer * buffer)
interpolation and speckle filtering) ""
*/
if (METHOD_SGBM == fs->method) {
cvCvtColor (fs->cvRGB_left, fs->cvGray_left, CV_RGB2GRAY);
cvCvtColor (fs->cvRGB_right, fs->cvGray_right, CV_RGB2GRAY);
cvtColor (fs->cvRGB_left, fs->cvGray_left, COLOR_RGB2GRAY);
cvtColor (fs->cvRGB_right, fs->cvGray_right, COLOR_RGB2GRAY);
run_sgbm_iteration (fs);
cvNormalize (fs->cvGray_depth_map1, fs->cvGray_depth_map2, 0, 255,
CV_MINMAX, NULL);
cvCvtColor (fs->cvGray_depth_map2, fs->cvRGB_right, CV_GRAY2RGB);
normalize (fs->cvGray_depth_map1, fs->cvGray_depth_map2, 0, 255,
NORM_MINMAX, fs->cvGray_depth_map2.type ());
cvtColor (fs->cvGray_depth_map2, fs->cvRGB_right, COLOR_GRAY2RGB);
}
/* Algorithm 1 is the OpenCV Stereo Block Matching, similar to the one
developed by Kurt Konolige [A] and that works by using small Sum-of-absolute-
differences (SAD) window. See the comments on top of the file.
*/
else if (METHOD_SBM == fs->method) {
cvCvtColor (fs->cvRGB_left, fs->cvGray_left, CV_RGB2GRAY);
cvCvtColor (fs->cvRGB_right, fs->cvGray_right, CV_RGB2GRAY);
cvtColor (fs->cvRGB_left, fs->cvGray_left, COLOR_RGB2GRAY);
cvtColor (fs->cvRGB_right, fs->cvGray_right, COLOR_RGB2GRAY);
run_sbm_iteration (fs);
cvNormalize (fs->cvGray_depth_map1, fs->cvGray_depth_map2, 0, 255,
CV_MINMAX, NULL);
cvCvtColor (fs->cvGray_depth_map2, fs->cvRGB_right, CV_GRAY2RGB);
normalize (fs->cvGray_depth_map1, fs->cvGray_depth_map2, 0, 255,
NORM_MINMAX, fs->cvGray_depth_map2.type ());
cvtColor (fs->cvGray_depth_map2, fs->cvRGB_right, COLOR_GRAY2RGB);
}
@ -612,40 +605,37 @@ gst_disparity_plugin_init (GstPlugin * disparity)
static void
initialise_disparity (GstDisparity * fs, int width, int height, int nchannels)
{
int cv_type = CV_8UC3;
fs->width = width;
fs->height = height;
fs->actualChannels = nchannels;
fs->imgSize = cvSize (fs->width, fs->height);
if (fs->cvRGB_right)
gst_disparity_release_all_pointers (fs);
fs->imgSize = Size (fs->width, fs->height);
if (fs->actualChannels == 1) {
cv_type = CV_8UC1;
} else if (fs->actualChannels == 2) {
cv_type = CV_8UC2;
}
fs->cvRGB_right = cvCreateImageHeader (fs->imgSize, IPL_DEPTH_8U,
fs->actualChannels);
fs->cvRGB_left = cvCreateImageHeader (fs->imgSize, IPL_DEPTH_8U,
fs->actualChannels);
fs->cvGray_right = cvCreateImage (fs->imgSize, IPL_DEPTH_8U, 1);
fs->cvGray_left = cvCreateImage (fs->imgSize, IPL_DEPTH_8U, 1);
fs->cvRGB_right.create (fs->imgSize, cv_type);
fs->cvRGB_left.create (fs->imgSize, cv_type);
fs->cvGray_right.create (fs->imgSize, CV_8UC1);
fs->cvGray_left.create (fs->imgSize, CV_8UC1);
fs->cvGray_depth_map1 = cvCreateImage (fs->imgSize, IPL_DEPTH_16S, 1);
fs->cvGray_depth_map2 = cvCreateImage (fs->imgSize, IPL_DEPTH_8U, 1);
fs->cvGray_depth_map1_2 = cvCreateImage (fs->imgSize, IPL_DEPTH_16S, 1);
fs->cvGray_depth_map1.create (fs->imgSize, CV_16SC1);
fs->cvGray_depth_map2.create (fs->imgSize, CV_8UC1);
fs->cvGray_depth_map1_2.create (fs->imgSize, CV_16SC1);
/* Stereo Block Matching methods */
if ((NULL != fs->cvRGB_right) && (NULL != fs->cvRGB_left)
&& (NULL != fs->cvGray_depth_map2))
initialise_sbm (fs);
initialise_sbm (fs);
}
int
initialise_sbm (GstDisparity * filter)
{
filter->img_right_as_cvMat_gray =
(void *) new Mat (cvarrToMat (filter->cvGray_right, false));
filter->img_left_as_cvMat_gray =
(void *) new Mat (cvarrToMat (filter->cvGray_left, false));
filter->depth_map_as_cvMat =
(void *) new Mat (cvarrToMat (filter->cvGray_depth_map1, false));
filter->img_right_as_cvMat_gray = Mat (filter->cvGray_right);
filter->img_left_as_cvMat_gray = Mat (filter->cvGray_left);
filter->depth_map_as_cvMat = Mat (filter->cvGray_depth_map1);
filter->sbm = StereoBM::create ();
filter->sgbm = StereoSGBM::create (1, 64, 3);
@ -679,10 +669,8 @@ initialise_sbm (GstDisparity * filter)
int
run_sbm_iteration (GstDisparity * filter)
{
((StereoBM *) filter->sbm)->
compute (*((Mat *) filter->img_left_as_cvMat_gray),
*((Mat *) filter->img_right_as_cvMat_gray),
*((Mat *) filter->depth_map_as_cvMat));
((StereoBM *) filter->sbm)->compute (filter->img_left_as_cvMat_gray,
filter->img_right_as_cvMat_gray, filter->depth_map_as_cvMat);
return (0);
}
@ -690,23 +678,8 @@ run_sbm_iteration (GstDisparity * filter)
int
run_sgbm_iteration (GstDisparity * filter)
{
((StereoSGBM *) filter->sgbm)->
compute (*((Mat *) filter->img_left_as_cvMat_gray),
*((Mat *) filter->img_right_as_cvMat_gray),
*((Mat *) filter->depth_map_as_cvMat));
return (0);
}
int
finalise_sbm (GstDisparity * filter)
{
delete (Mat *) filter->depth_map_as_cvMat;
delete (Mat *) filter->img_left_as_cvMat_gray;
delete (Mat *) filter->img_right_as_cvMat_gray;
filter->sbm.release ();
filter->sgbm.release ();
((StereoSGBM *) filter->sgbm)->compute (filter->img_left_as_cvMat_gray,
filter->img_right_as_cvMat_gray, filter->depth_map_as_cvMat);
return (0);
}

View file

@ -49,9 +49,6 @@
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/types_c.h>
#endif
G_BEGIN_DECLS
/* #defines don't like whitespacey bits */
@ -87,18 +84,18 @@ struct _GstDisparity
GCond cond;
gboolean flushing;
CvSize imgSize;
IplImage *cvRGB_right;
IplImage *cvRGB_left;
IplImage *cvGray_right;
IplImage *cvGray_left;
IplImage *cvGray_depth_map1; /*IPL_DEPTH_16S */
IplImage *cvGray_depth_map2; /*IPL_DEPTH_8U */
IplImage *cvGray_depth_map1_2; /*IPL_DEPTH_16S */
cv::Size imgSize;
cv::Mat cvRGB_right;
cv::Mat cvRGB_left;
cv::Mat cvGray_right;
cv::Mat cvGray_left;
cv::Mat cvGray_depth_map1; /*IPL_DEPTH_16S */
cv::Mat cvGray_depth_map2; /*IPL_DEPTH_8U */
cv::Mat cvGray_depth_map1_2; /*IPL_DEPTH_16S */
void *img_right_as_cvMat_gray; /* cv::Mat */
void *img_left_as_cvMat_gray; /* cv::Mat */
void *depth_map_as_cvMat; /* cv::Mat */
cv::Mat img_right_as_cvMat_gray;
cv::Mat img_left_as_cvMat_gray;
cv::Mat depth_map_as_cvMat;
cv::Ptr<cv::StereoBM> sbm; /* cv::StereoBM */
cv::Ptr<cv::StereoSGBM> sgbm; /* cv::StereoSGBM */

View file

@ -51,7 +51,7 @@
* <refsect2>
* <title>Example launch line</title>
* |[
* gst-launch-1.0 videotestsrc ! decodebin ! videoconvert ! edgedetect ! videoconvert ! xvimagesink
* gst-launch-1.0 videotestsrc ! videoconvert ! edgedetect ! videoconvert ! xvimagesink
* ]|
* </refsect2>
*/
@ -62,9 +62,6 @@
#include "gstedgedetect.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_edge_detect_debug);
#define GST_CAT_DEFAULT gst_edge_detect_debug
@ -109,10 +106,10 @@ static void gst_edge_detect_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_edge_detect_transform (GstOpencvVideoFilter * filter,
GstBuffer * buf, IplImage * img, GstBuffer * outbuf, IplImage * outimg);
GstBuffer * buf, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg);
static gboolean gst_edge_detect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
/* Clean up */
static void
@ -120,10 +117,8 @@ gst_edge_detect_finalize (GObject * obj)
{
GstEdgeDetect *filter = GST_EDGE_DETECT (obj);
if (filter->cvEdge != NULL) {
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvEdge);
}
filter->cvGray.release ();
filter->cvEdge.release ();
G_OBJECT_CLASS (gst_edge_detect_parent_class)->finalize (obj);
}
@ -245,39 +240,32 @@ gst_edge_detect_get_property (GObject * object, guint prop_id,
/* this function handles the link with other elements */
static gboolean
gst_edge_detect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstEdgeDetect *filter = GST_EDGE_DETECT (transform);
if (filter->cvEdge != NULL) {
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvEdge);
}
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvEdge =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvGray.create (cv::Size (in_width, in_height), CV_8UC1);
filter->cvEdge.create (cv::Size (in_width, in_height), CV_8UC1);
return TRUE;
}
static GstFlowReturn
gst_edge_detect_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img, GstBuffer * outbuf, IplImage * outimg)
cv::Mat img, GstBuffer * outbuf, cv::Mat outimg)
{
GstEdgeDetect *filter = GST_EDGE_DETECT (base);
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvCanny (filter->cvGray, filter->cvEdge, filter->threshold1,
cv::cvtColor (img, filter->cvGray, cv::COLOR_RGB2GRAY);
cv::Canny (filter->cvGray, filter->cvEdge, filter->threshold1,
filter->threshold2, filter->aperture);
cvZero (outimg);
outimg.setTo (cv::Scalar::all (0));
if (filter->mask) {
cvCopy (img, outimg, filter->cvEdge);
img.copyTo (outimg, filter->cvEdge);
} else {
cvCvtColor (filter->cvEdge, outimg, CV_GRAY2RGB);
cv::cvtColor (filter->cvEdge, outimg, cv::COLOR_GRAY2RGB);
}
return GST_FLOW_OK;

View file

@ -75,8 +75,8 @@ struct _GstEdgeDetect
int threshold2;
int aperture;
IplImage *cvEdge;
IplImage *cvGray;
cv::Mat cvEdge;
cv::Mat cvGray;
};
struct _GstEdgeDetectClass

View file

@ -65,9 +65,6 @@
#include "gstfaceblur.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_face_blur_debug);
#define GST_CAT_DEFAULT gst_face_blur_debug
@ -156,10 +153,10 @@ static void gst_face_blur_get_property (GObject * object, guint prop_id,
static gboolean gst_face_blur_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static GstFlowReturn gst_face_blur_transform_ip (GstOpencvVideoFilter *
transform, GstBuffer * buffer, IplImage * img);
transform, GstBuffer * buffer, Mat img);
static CascadeClassifier *gst_face_blur_load_profile (GstFaceBlur *
filter, gchar * profile);
@ -170,8 +167,7 @@ gst_face_blur_finalize (GObject * obj)
{
GstFaceBlur *filter = GST_FACE_BLUR (obj);
if (filter->cvGray)
cvReleaseImage (&filter->cvGray);
filter->cvGray.release ();
if (filter->cvCascade)
delete filter->cvCascade;
@ -328,22 +324,19 @@ gst_face_blur_get_property (GObject * object, guint prop_id,
static gboolean
gst_face_blur_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstFaceBlur *filter = GST_FACE_BLUR (transform);
if (filter->cvGray)
cvReleaseImage (&filter->cvGray);
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvGray.create (Size (in_width, in_height), CV_8UC1);
return TRUE;
}
static GstFlowReturn
gst_face_blur_transform_ip (GstOpencvVideoFilter * transform,
GstBuffer * buffer, IplImage * img)
GstBuffer * buffer, Mat img)
{
GstFaceBlur *filter = GST_FACE_BLUR (transform);
vector < Rect > faces;
@ -360,19 +353,17 @@ gst_face_blur_transform_ip (GstOpencvVideoFilter * transform,
return GST_FLOW_OK;
}
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvtColor (img, filter->cvGray, COLOR_RGB2GRAY);
Mat image = cvarrToMat (filter->cvGray);
filter->cvCascade->detectMultiScale (image, faces, filter->scale_factor,
filter->min_neighbors, filter->flags,
cvSize (filter->min_size_width, filter->min_size_height), cvSize (0, 0));
filter->cvCascade->detectMultiScale (filter->cvGray, faces,
filter->scale_factor, filter->min_neighbors, filter->flags,
Size (filter->min_size_width, filter->min_size_height), Size (0, 0));
if (!faces.empty ()) {
for (i = 0; i < faces.size (); ++i) {
Rect *r = &faces[i];
Mat imag = cvarrToMat (img);
Mat roi (imag, Rect (r->x, r->y, r->width, r->height));
Mat roi (img, Rect (r->x, r->y, r->width, r->height));
blur (roi, roi, Size (11, 11));
GaussianBlur (roi, roi, Size (11, 11), 0, 0);
}

View file

@ -82,7 +82,7 @@ struct _GstFaceBlur
gint min_size_width;
gint min_size_height;
IplImage *cvGray;
cv::Mat cvGray;
cv::CascadeClassifier *cvCascade;
};

View file

@ -5,6 +5,7 @@
* Copyright (C) 2008 Michael Sheldon <mike@mikeasoft.com>
* Copyright (C) 2011 Stefan Sauer <ensonic@users.sf.net>
* Copyright (C) 2014 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -82,9 +83,6 @@ using namespace std;
#include "gstfacedetect.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_face_detect_debug);
#define GST_CAT_DEFAULT gst_face_detect_debug
@ -236,10 +234,10 @@ static void gst_face_detect_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static gboolean gst_face_detect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static GstFlowReturn gst_face_detect_transform_ip (GstOpencvVideoFilter * base,
GstBuffer * buf, IplImage * img);
GstBuffer * buf, Mat img);
static CascadeClassifier *gst_face_detect_load_profile (GstFaceDetect *
filter, gchar * profile);
@ -250,8 +248,7 @@ gst_face_detect_finalize (GObject * obj)
{
GstFaceDetect *filter = GST_FACE_DETECT (obj);
if (filter->cvGray)
cvReleaseImage (&filter->cvGray);
filter->cvGray.release ();
g_free (filter->face_profile);
g_free (filter->nose_profile);
@ -517,18 +514,14 @@ gst_face_detect_get_property (GObject * object, guint prop_id,
/* this function handles the link with other elements */
static gboolean
gst_face_detect_set_caps (GstOpencvVideoFilter * transform, gint in_width,
gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstFaceDetect *filter;
filter = GST_FACE_DETECT (transform);
if (filter->cvGray)
cvReleaseImage (&filter->cvGray);
filter->cvGray = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U,
1);
filter->cvGray.create (Size (in_width, in_height), CV_8UC1);
return TRUE;
}
@ -561,15 +554,15 @@ gst_face_detect_run_detector (GstFaceDetect * filter,
{
double img_stddev = 0;
if (filter->min_stddev > 0) {
CvScalar mean, stddev;
cvAvgSdv (filter->cvGray, &mean, &stddev, NULL);
Scalar mean, stddev;
meanStdDev (filter->cvGray, mean, stddev);
img_stddev = stddev.val[0];
}
if (img_stddev >= filter->min_stddev) {
Mat roi (cv::cvarrToMat (filter->cvGray), r);
Mat roi (filter->cvGray, r);
detector->detectMultiScale (roi, faces, filter->scale_factor,
filter->min_neighbors, filter->flags, cvSize (min_size_width,
min_size_height), cvSize (0, 0));
filter->min_neighbors, filter->flags, Size (min_size_width,
min_size_height), Size (0, 0));
} else {
GST_LOG_OBJECT (filter,
"Calculated stddev %f lesser than min_stddev %d, detection not performed",
@ -582,7 +575,7 @@ gst_face_detect_run_detector (GstFaceDetect * filter,
*/
static GstFlowReturn
gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
Mat img)
{
GstFaceDetect *filter = GST_FACE_DETECT (base);
@ -597,14 +590,13 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
vector < Rect > eyes;
gboolean post_msg = FALSE;
Mat mtxOrg (cv::cvarrToMat (img));
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvtColor (img, filter->cvGray, COLOR_RGB2GRAY);
gst_face_detect_run_detector (filter, filter->cvFaceDetect,
filter->min_size_width, filter->min_size_height,
Rect (filter->cvGray->origin, filter->cvGray->origin,
filter->cvGray->width, filter->cvGray->height), faces);
Rect (0, 0,
filter->cvGray.size ().width, filter->cvGray.size ().height),
faces);
switch (filter->updates) {
case GST_FACEDETECT_UPDATES_EVERY_FRAME:
@ -716,7 +708,7 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
}
if (filter->display) {
CvPoint center;
Point center;
Size axes;
gdouble w, h;
gint cb = 255 - ((i & 3) << 7);
@ -729,7 +721,7 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
center.y = cvRound ((r.y + h));
axes.width = w;
axes.height = h * 1.25; /* tweak for face form */
ellipse (mtxOrg, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 3, 8, 0);
ellipse (img, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 3, 8, 0);
if (have_nose) {
Rect sr = nose[0];
@ -740,8 +732,7 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
center.y = cvRound ((rny + sr.y + h));
axes.width = w;
axes.height = h * 1.25; /* tweak for nose form */
ellipse (mtxOrg, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 1, 8,
0);
ellipse (img, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 1, 8, 0);
}
if (have_mouth) {
Rect sr = mouth[0];
@ -752,8 +743,7 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
center.y = cvRound ((rmy + sr.y + h));
axes.width = w * 1.5; /* tweak for mouth form */
axes.height = h;
ellipse (mtxOrg, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 1, 8,
0);
ellipse (img, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 1, 8, 0);
}
if (have_eyes) {
Rect sr = eyes[0];
@ -764,8 +754,7 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
center.y = cvRound ((rey + sr.y + h));
axes.width = w * 1.5; /* tweak for eyes form */
axes.height = h;
ellipse (mtxOrg, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 1, 8,
0);
ellipse (img, center, axes, 0, 0, 360, Scalar (cr, cg, cb), 1, 8, 0);
}
}
gst_buffer_add_video_region_of_interest_meta (buf, "face",
@ -778,7 +767,6 @@ gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
g_value_unset (&facelist);
gst_element_post_message (GST_ELEMENT (filter), msg);
}
mtxOrg.release ();
}
return GST_FLOW_OK;

View file

@ -5,6 +5,7 @@
* Copyright (C) 2008 Michael Sheldon <mike@mikeasoft.com>
* Copyright (C) 2011 Stefan Sauer <ensonic@users.sf.net>
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -50,8 +51,6 @@
#include <gst/gst.h>
#include <gst/opencv/gstopencvvideofilter.h>
#include <opencv2/core.hpp>
#include <opencv2/objdetect.hpp>
G_BEGIN_DECLS
@ -104,7 +103,7 @@ struct _GstFaceDetect
gint min_stddev;
gint updates;
IplImage *cvGray;
cv::Mat cvGray;
cv::CascadeClassifier *cvFaceDetect;
cv::CascadeClassifier *cvNoseDetect;
cv::CascadeClassifier *cvMouthDetect;

View file

@ -86,9 +86,6 @@
#include "gstgrabcut.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_grabcut_debug);
#define GST_CAT_DEFAULT gst_grabcut_debug
@ -129,23 +126,37 @@ static void gst_grabcut_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_grabcut_transform_ip (GstOpencvVideoFilter * filter,
GstBuffer * buf, IplImage * img);
GstBuffer * buf, Mat img);
static gboolean gst_grabcut_set_caps (GstOpencvVideoFilter * filter,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static void gst_grabcut_release_all_pointers (GstGrabcut * filter);
//static void gst_grabcut_release_all_pointers (GstGrabcut * filter);
static gboolean gst_grabcut_stop (GstBaseTransform * basesrc);
static void compose_matrix_from_image (CvMat * output, IplImage * input);
static void compose_matrix_from_image (Mat output, Mat input);
static int initialise_grabcut (struct grabcut_params *GC, IplImage * image_c,
CvMat * mask_c);
static int run_grabcut_iteration (struct grabcut_params *GC,
IplImage * image_c, CvMat * mask_c, CvRect * bbox);
static int run_grabcut_iteration2 (struct grabcut_params *GC,
IplImage * image_c, CvMat * mask_c, CvRect * bbox);
static int finalise_grabcut (struct grabcut_params *GC);
static int run_grabcut_iteration (Mat image_c, Mat mask_c, Mat bgdModel,
Mat fgdModel);
static int run_grabcut_iteration2 (Mat image_c, Mat mask_c, Mat bgdModel,
Mat fgdModel, Rect bbox);
/* Clean up */
static void
gst_grabcut_finalize (GObject * obj)
{
GstGrabcut *filter = GST_GRABCUT (obj);
filter->cvRGBin.release ();
filter->cvA.release ();
filter->cvB.release ();
filter->cvC.release ();
filter->cvD.release ();
filter->grabcut_mask.release ();
filter->bgdModel.release ();
filter->fgdModel.release ();
G_OBJECT_CLASS (gst_grabcut_parent_class)->finalize (obj);
}
/* initialize the grabcut's class */
static void
@ -157,10 +168,10 @@ gst_grabcut_class_init (GstGrabcutClass * klass)
(GstOpencvVideoFilterClass *) klass;
GstBaseTransformClass *btrans_class = (GstBaseTransformClass *) klass;
gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_grabcut_finalize);
gobject_class->set_property = gst_grabcut_set_property;
gobject_class->get_property = gst_grabcut_get_property;
btrans_class->stop = gst_grabcut_stop;
btrans_class->passthrough_on_same_caps = TRUE;
cvbasefilter_class->cv_trans_ip_func = gst_grabcut_transform_ip;
@ -247,64 +258,37 @@ gst_grabcut_get_property (GObject * object, guint prop_id,
/* this function handles the link with other elements */
static gboolean
gst_grabcut_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels, gint out_width,
gint out_height, gint out_depth, gint out_channels)
gint in_height, int in_cv_type, gint out_width,
gint out_height, int out_cv_type)
{
GstGrabcut *grabcut = GST_GRABCUT (filter);
CvSize size;
Size size;
size = cvSize (in_width, in_height);
size = Size (in_width, in_height);
/* If cvRGB is already allocated, it means there's a cap modification,
* so release first all the images. */
if (!grabcut->cvRGBin)
gst_grabcut_release_all_pointers (grabcut);
grabcut->cvRGBin = cvCreateImage (size, IPL_DEPTH_8U, 3);
grabcut->cvRGBin.create (size, CV_8UC3);
grabcut->cvA = cvCreateImage (size, IPL_DEPTH_8U, 1);
grabcut->cvB = cvCreateImage (size, IPL_DEPTH_8U, 1);
grabcut->cvC = cvCreateImage (size, IPL_DEPTH_8U, 1);
grabcut->cvD = cvCreateImage (size, IPL_DEPTH_8U, 1);
grabcut->cvA.create (size, CV_8UC1);
grabcut->cvB.create (size, CV_8UC1);
grabcut->cvC.create (size, CV_8UC1);
grabcut->cvD.create (size, CV_8UC1);
grabcut->grabcut_mask = cvCreateMat (size.height, size.width, CV_8UC1);
cvZero (grabcut->grabcut_mask);
initialise_grabcut (&(grabcut->GC), grabcut->cvRGBin, grabcut->grabcut_mask);
grabcut->grabcut_mask = Mat::zeros (size, CV_8UC1);
grabcut->bgdModel = Mat ();
grabcut->fgdModel = Mat ();
//initialise_grabcut (&(grabcut->GC), grabcut->cvRGBin, grabcut->grabcut_mask);
return TRUE;
}
/* Clean up */
static gboolean
gst_grabcut_stop (GstBaseTransform * basesrc)
{
GstGrabcut *filter = GST_GRABCUT (basesrc);
if (filter->cvRGBin != NULL)
gst_grabcut_release_all_pointers (filter);
return TRUE;
}
static void
gst_grabcut_release_all_pointers (GstGrabcut * filter)
{
cvReleaseImage (&filter->cvRGBin);
cvReleaseImage (&filter->cvA);
cvReleaseImage (&filter->cvB);
cvReleaseImage (&filter->cvC);
cvReleaseImage (&filter->cvD);
finalise_grabcut (&(filter->GC));
}
static GstFlowReturn
gst_grabcut_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buffer,
IplImage * img)
Mat img)
{
GstGrabcut *gc = GST_GRABCUT (filter);
gint alphapixels;
std::vector < Mat > channels (4);
GstVideoRegionOfInterestMeta *meta;
meta = gst_buffer_get_video_region_of_interest_meta (buffer);
@ -318,24 +302,28 @@ gst_grabcut_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buffer,
}
/* normally input should be RGBA */
cvSplit (img, gc->cvA, gc->cvB, gc->cvC, gc->cvD);
cvCvtColor (img, gc->cvRGBin, CV_BGRA2BGR);
split (img, channels);
gc->cvA = channels.at (0);
gc->cvB = channels.at (1);
gc->cvC = channels.at (2);
gc->cvD = channels.at (3);
cvtColor (img, gc->cvRGBin, COLOR_BGRA2BGR);
compose_matrix_from_image (gc->grabcut_mask, gc->cvD);
/* Pass cvD to grabcut_mask for the graphcut stuff but that only if
really there is something in the mask! otherwise -->input bbox is
what we use */
alphapixels = cvCountNonZero (gc->cvD);
alphapixels = countNonZero (gc->cvD);
if ((0 < alphapixels) && (alphapixels < (gc->width * gc->height))) {
GST_INFO ("running on mask");
run_grabcut_iteration (&(gc->GC), gc->cvRGBin, gc->grabcut_mask, NULL);
run_grabcut_iteration (gc->cvRGBin, gc->grabcut_mask, gc->bgdModel,
gc->fgdModel);
} else {
if ((abs (gc->facepos.width) > 2) && (abs (gc->facepos.height) > 2)) {
GST_INFO ("running on bbox (%d,%d),(%d,%d)", gc->facepos.x, gc->facepos.y,
gc->facepos.width, gc->facepos.height);
run_grabcut_iteration2 (&(gc->GC), gc->cvRGBin, gc->grabcut_mask,
&(gc->facepos));
run_grabcut_iteration2 (gc->cvRGBin, gc->grabcut_mask, gc->bgdModel,
gc->fgdModel, gc->facepos);
} else {
GST_WARNING ("No face info present, skipping frame.");
return GST_FLOW_OK;
@ -345,21 +333,21 @@ gst_grabcut_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buffer,
/* if we want to display, just overwrite the output */
if (gc->test_mode) {
/* get only FG, PR_FG */
cvAndS (gc->grabcut_mask, cvRealScalar (1), gc->grabcut_mask, NULL);
bitwise_and (gc->grabcut_mask, Scalar (1), gc->grabcut_mask);
/* (saturated) FG, PR_FG --> 255 */
cvConvertScale (gc->grabcut_mask, gc->grabcut_mask, 255.0, 0.0);
gc->grabcut_mask.convertTo (gc->grabcut_mask, -1, 255.0, 0.0);
cvAnd (gc->grabcut_mask, gc->cvA, gc->cvA, NULL);
cvAnd (gc->grabcut_mask, gc->cvB, gc->cvB, NULL);
cvAnd (gc->grabcut_mask, gc->cvC, gc->cvC, NULL);
bitwise_and (gc->grabcut_mask, gc->cvA, gc->cvA);
bitwise_and (gc->grabcut_mask, gc->cvB, gc->cvB);
bitwise_and (gc->grabcut_mask, gc->cvC, gc->cvC);
}
cvMerge (gc->cvA, gc->cvB, gc->cvC, gc->cvD, img);
merge (channels, img);
if (gc->test_mode) {
cvRectangle (img,
cvPoint (gc->facepos.x, gc->facepos.y),
cvPoint (gc->facepos.x + gc->facepos.width,
rectangle (img,
Point (gc->facepos.x, gc->facepos.y),
Point (gc->facepos.x + gc->facepos.width,
gc->facepos.y + gc->facepos.height), CV_RGB (255, 0, 255), 1, 8, 0);
}
@ -385,65 +373,34 @@ gst_grabcut_plugin_init (GstPlugin * plugin)
}
void
compose_matrix_from_image (CvMat * output, IplImage * input)
compose_matrix_from_image (Mat output, Mat input)
{
int x, y;
for (x = 0; x < output->cols; x++) {
for (y = 0; y < output->rows; y++) {
CV_MAT_ELEM (*output, uchar, y, x) =
(cvGetReal2D (input, y, x) <= GC_PR_FGD) ? cvGetReal2D (input, y,
x) : GC_PR_FGD;
for (x = 0; x < output.cols; x++) {
for (y = 0; y < output.rows; y++) {
output.data[output.step[0] * y + x] =
(input.data[input.step[0] * y + x] <=
GC_PR_FGD) ? input.data[input.step[0] * y + x] : GC_PR_FGD;
}
}
}
int
initialise_grabcut (struct grabcut_params *GC, IplImage * image_c,
CvMat * mask_c)
run_grabcut_iteration (Mat image_c, Mat mask_c, Mat bgdModel, Mat fgdModel)
{
GC->image = (void *) new Mat (cvarrToMat (image_c, false)); /* "true" refers to copydata */
GC->mask = (void *) new Mat (cvarrToMat (mask_c, false));
GC->bgdModel = (void *) new Mat (); /* "true" refers to copydata */
GC->fgdModel = (void *) new Mat ();
if (countNonZero (mask_c))
grabCut (image_c, mask_c, Rect (),
bgdModel, bgdModel, 1, GC_INIT_WITH_MASK);
return (0);
}
int
run_grabcut_iteration (struct grabcut_params *GC, IplImage * image_c,
CvMat * mask_c, CvRect * bbox)
run_grabcut_iteration2 (Mat image_c, Mat mask_c, Mat bgdModel, Mat fgdModel,
Rect bbox)
{
((Mat *) GC->image)->data = (uchar *) image_c->imageData;
((Mat *) GC->mask)->data = mask_c->data.ptr;
if (cvCountNonZero (mask_c))
grabCut (*((Mat *) GC->image), *((Mat *) GC->mask), Rect (),
*((Mat *) GC->bgdModel), *((Mat *) GC->fgdModel), 1, GC_INIT_WITH_MASK);
return (0);
}
int
run_grabcut_iteration2 (struct grabcut_params *GC, IplImage * image_c,
CvMat * mask_c, CvRect * bbox)
{
((Mat *) GC->image)->data = (uchar *) image_c->imageData;
((Mat *) GC->mask)->data = mask_c->data.ptr;
grabCut (*((Mat *) GC->image), *((Mat *) GC->mask), *(bbox),
*((Mat *) GC->bgdModel), *((Mat *) GC->fgdModel), 1, GC_INIT_WITH_RECT);
return (0);
}
int
finalise_grabcut (struct grabcut_params *GC)
{
delete ((Mat *) GC->image);
delete ((Mat *) GC->mask);
delete ((Mat *) GC->bgdModel);
delete ((Mat *) GC->fgdModel);
grabCut (image_c, mask_c, bbox, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT);
return (0);
}

View file

@ -49,9 +49,6 @@
#include <gst/video/gstvideofilter.h>
#include <gst/opencv/gstopencvvideofilter.h>
#include <opencv2/core.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/types_c.h>
#endif
G_BEGIN_DECLS
@ -69,15 +66,6 @@ G_BEGIN_DECLS
typedef struct _GstGrabcut GstGrabcut;
typedef struct _GstGrabcutClass GstGrabcutClass;
struct grabcut_params
{
void *bgdModel;
void *fgdModel;
void *image;
void *mask;
};
struct _GstGrabcut
{
GstOpencvVideoFilter parent;
@ -85,16 +73,17 @@ struct _GstGrabcut
gboolean test_mode;
gdouble scale; // grow multiplier to apply to input bbox
IplImage *cvRGBin;
IplImage *cvA;
IplImage *cvB;
IplImage *cvC;
IplImage *cvD;
cv::Mat cvRGBin;
cv::Mat cvA;
cv::Mat cvB;
cv::Mat cvC;
cv::Mat cvD;
CvMat *grabcut_mask; // mask created by graphcut
struct grabcut_params GC;
CvRect facepos;
cv::Mat grabcut_mask; // mask created by graphcut
cv::Mat bgdModel;
cv::Mat fgdModel;
cv::Rect facepos;
};
struct _GstGrabcutClass

View file

@ -63,9 +63,6 @@
/* element header */
#include "gsthanddetect.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_handdetect_debug);
#define GST_CAT_DEFAULT gst_handdetect_debug
@ -115,10 +112,10 @@ static void gst_handdetect_set_property (GObject * object, guint prop_id,
static void gst_handdetect_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static gboolean gst_handdetect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static GstFlowReturn gst_handdetect_transform_ip (GstOpencvVideoFilter *
transform, GstBuffer * buffer, IplImage * img);
transform, GstBuffer * buffer, Mat img);
static CascadeClassifier *gst_handdetect_load_profile (GstHanddetect * filter,
gchar * profile);
@ -165,11 +162,14 @@ gst_handdetect_finalize (GObject * obj)
{
GstHanddetect *filter = GST_HANDDETECT (obj);
if (filter->cvGray)
cvReleaseImage (&filter->cvGray);
filter->cvGray.release ();
g_free (filter->profile_fist);
g_free (filter->profile_palm);
delete (filter->best_r);
if (filter->cvCascade_fist)
delete filter->cvCascade_fist;
if (filter->cvCascade_palm)
delete filter->cvCascade_palm;
G_OBJECT_CLASS (gst_handdetect_parent_class)->finalize (obj);
}
@ -373,8 +373,8 @@ gst_handdetect_get_property (GObject * object, guint prop_id, GValue * value,
/* this function handles the link with other elements */
static gboolean
gst_handdetect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstHanddetect *filter;
filter = GST_HANDDETECT (transform);
@ -384,10 +384,7 @@ gst_handdetect_set_caps (GstOpencvVideoFilter * transform,
GST_WARNING_OBJECT (filter,
"resize to 320 x 240 to have best detect accuracy.\n");
if (filter->cvGray)
cvReleaseImage (&filter->cvGray);
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvGray.create (Size (in_width, in_height), CV_8UC1);
return TRUE;
}
@ -397,7 +394,7 @@ gst_handdetect_set_caps (GstOpencvVideoFilter * transform,
*/
static GstFlowReturn
gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
GstBuffer * buffer, IplImage * img)
GstBuffer * buffer, Mat img)
{
GstHanddetect *filter = GST_HANDDETECT (transform);
Rect *r;
@ -409,29 +406,27 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
/* check detection cascades */
if (filter->cvCascade_fist && filter->cvCascade_palm) {
/* cvt to gray colour space for hand detect */
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvtColor (img, filter->cvGray, COLOR_RGB2GRAY);
/* detect FIST gesture fist */
Mat image = cvarrToMat (filter->cvGray);
Mat roi (image, Rect (filter->cvGray->origin,
filter->cvGray->origin, filter->cvGray->width,
filter->cvGray->height));
Mat roi (filter->cvGray, Rect (0,
0, filter->cvGray.size ().width, filter->cvGray.size ().height));
filter->cvCascade_fist->detectMultiScale (roi, hands, 1.1, 2,
CASCADE_DO_CANNY_PRUNING, cvSize (24, 24), cvSize (0, 0));
CASCADE_DO_CANNY_PRUNING, Size (24, 24), Size (0, 0));
/* if FIST gesture detected */
if (!hands.empty ()) {
int min_distance, distance;
Rect temp_r;
CvPoint c;
Point c;
/* Go through all detected FIST gestures to get the best one
* prev_r => previous hand
* best_r => best hand in this frame
*/
/* set min_distance for init comparison */
min_distance = img->width + img->height;
min_distance = img.size ().width + img.size ().height;
/* Init filter->prev_r */
temp_r = Rect (0, 0, 0, 0);
if (filter->prev_r == NULL)
@ -452,7 +447,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
/* send msg to app/bus if the detected gesture falls in the region of interest */
/* get center point of gesture */
c = cvPoint (filter->best_r->x + filter->best_r->width / 2,
c = Point (filter->best_r->x + filter->best_r->width / 2,
filter->best_r->y + filter->best_r->height / 2);
/* send message:
* if the center point is in the region of interest, OR,
@ -494,28 +489,23 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
/* Check filter->display,
* If TRUE, displaying red circle marker in the out frame */
if (filter->display) {
CvPoint center;
Point center;
int radius;
center.x = cvRound ((filter->best_r->x + filter->best_r->width * 0.5));
center.y = cvRound ((filter->best_r->y + filter->best_r->height * 0.5));
radius =
cvRound ((filter->best_r->width + filter->best_r->height) * 0.25);
cvCircle (img, center, radius, CV_RGB (0, 0, 200), 1, 8, 0);
circle (img, center, radius, CV_RGB (0, 0, 200), 1, 8, 0);
}
} else {
/* if NO FIST gesture, detecting PALM gesture */
#if (CV_MAJOR_VERSION >= 4)
filter->cvCascade_palm->detectMultiScale (roi, hands, 1.1, 2,
CASCADE_DO_CANNY_PRUNING, cvSize (24, 24), cvSize (0, 0));
#else
filter->cvCascade_palm->detectMultiScale (roi, hands, 1.1, 2,
CV_HAAR_DO_CANNY_PRUNING, cvSize (24, 24), cvSize (0, 0));
#endif
CASCADE_DO_CANNY_PRUNING, Size (24, 24), Size (0, 0));
/* if PALM detected */
if (!hands.empty ()) {
int min_distance, distance;
Rect temp_r;
CvPoint c;
Point c;
if (filter->display) {
GST_DEBUG_OBJECT (filter, "%d PALM gestures detected\n",
@ -526,7 +516,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
* best_r => best hand in this frame
*/
/* suppose a min_distance for init comparison */
min_distance = img->width + img->height;
min_distance = img.size ().width + img.size ().height;
/* Init filter->prev_r */
temp_r = Rect (0, 0, 0, 0);
if (filter->prev_r == NULL)
@ -547,7 +537,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
/* send msg to app/bus if the detected gesture falls in the region of interest */
/* get center point of gesture */
c = cvPoint (filter->best_r->x + filter->best_r->width / 2,
c = Point (filter->best_r->x + filter->best_r->width / 2,
filter->best_r->y + filter->best_r->height / 2);
/* send message:
* if the center point is in the region of interest, OR,
@ -603,7 +593,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
/* Check filter->display,
* If TRUE, displaying red circle marker in the out frame */
if (filter->display) {
CvPoint center;
Point center;
int radius;
center.x =
cvRound ((filter->best_r->x + filter->best_r->width * 0.5));
@ -611,7 +601,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
cvRound ((filter->best_r->y + filter->best_r->height * 0.5));
radius =
cvRound ((filter->best_r->width + filter->best_r->height) * 0.25);
cvCircle (img, center, radius, CV_RGB (0, 0, 200), 1, 8, 0);
circle (img, center, radius, CV_RGB (0, 0, 200), 1, 8, 0);
}
}
}

View file

@ -81,7 +81,7 @@ struct _GstHanddetect
/* opencv
* cvGray - image to gray colour
*/
IplImage *cvGray;
cv::Mat cvGray;
cv::CascadeClassifier *cvCascade_fist;
cv::CascadeClassifier *cvCascade_palm;
cv::Rect *prev_r;

View file

@ -1,7 +1,7 @@
/*
* GStreamer MotionCells detect areas of motion
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2011 Nicola Murino <nicola.murino@gmail.com>
* Copyright (C) 2011 -2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -151,7 +151,7 @@ static void gst_motion_cells_get_property (GObject * object, guint prop_id,
static gboolean gst_motion_cells_handle_sink_event (GstPad * pad,
GstObject * parent, GstEvent * event);
static GstFlowReturn gst_motion_cells_transform_ip (GstOpencvVideoFilter *
filter, GstBuffer * buf, IplImage * img);
filter, GstBuffer * buf, cv::Mat img);
static void gst_motioncells_update_motion_cells (GstMotioncells * filter);
static void gst_motioncells_update_motion_masks (GstMotioncells * filter);
@ -384,7 +384,8 @@ gst_motion_cells_init (GstMotioncells * filter)
TRUE);
}
static void fix_coords(motionmaskcoordrect& coords, int width, int height)
static void
fix_coords (motionmaskcoordrect & coords, int width, int height)
{
--width;
--height;
@ -518,7 +519,8 @@ gst_motion_cells_set_property (GObject * object, guint prop_id,
filter->motionmaskcoords[i].lower_right_y = ly < 0 ? 0 : ly;
if (0 < filter->width && 0 < filter->height) {
fix_coords(filter->motionmaskcoords[i], filter->width, filter->height);
fix_coords (filter->motionmaskcoords[i], filter->width,
filter->height);
}
}
} else {
@ -840,12 +842,13 @@ gst_motion_cells_handle_sink_event (GstPad * pad, GstObject * parent,
filter->height = info.height;
if (0 != filter->has_delayed_mask
&& 0 < filter->motionmaskcoord_count && NULL != filter->motionmaskcoords
&& 0 < filter->width && 0 < filter->height)
{
&& 0 < filter->motionmaskcoord_count
&& NULL != filter->motionmaskcoords && 0 < filter->width
&& 0 < filter->height) {
filter->has_delayed_mask = 0;
for (i = 0; i < filter->motionmaskcoord_count; ++i) {
fix_coords(filter->motionmaskcoords[i], filter->width, filter->height);
fix_coords (filter->motionmaskcoords[i], filter->width,
filter->height);
}
}
@ -866,7 +869,7 @@ gst_motion_cells_handle_sink_event (GstPad * pad, GstObject * parent,
*/
static GstFlowReturn
gst_motion_cells_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
cv::Mat img)
{
GstMotioncells *filter = gst_motion_cells (base);

View file

@ -1,7 +1,7 @@
/*
* GStreamer
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2011 Nicola Murino <nicola.murino@gmail.com>
* Copyright (C) 2011 - 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -46,7 +46,6 @@
#define __GST_MOTIONCELLS_H__
#include <gst/opencv/gstopencvvideofilter.h>
#include <opencv2/core.hpp>
#include "motioncells_wrapper.h"
G_BEGIN_DECLS
@ -87,7 +86,6 @@ struct _GstMotioncells
gint64 diff_timestamp, starttime;
guint64 consecutive_motion;
gint width, height;
//time stuff
GTimeVal tv;
double framerate;
//Video width and height are known in "gst_motion_cells_handle_sink_event",

View file

@ -53,7 +53,7 @@
* <refsect2>
* <title>Example launch line</title>
* |[
* gst-launch-1.0 videotestsrc ! decodebin ! videoconvert ! retinex ! videoconvert ! xvimagesink
* gst-launch-1.0 videotestsrc ! videoconvert ! retinex ! videoconvert ! xvimagesink
* ]|
* </refsect2>
*/
@ -64,9 +64,6 @@
#include "gstretinex.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_retinex_debug);
#define GST_CAT_DEFAULT gst_retinex_debug
@ -128,14 +125,12 @@ static void gst_retinex_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_retinex_transform_ip (GstOpencvVideoFilter * filter,
GstBuffer * buff, IplImage * img);
GstBuffer * buff, Mat img);
static gboolean gst_retinex_set_caps (GstOpencvVideoFilter * btrans,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
static void gst_retinex_release_all_images (GstRetinex * filter);
static gboolean gst_retinex_stop (GstBaseTransform * basesrc);
static void gst_retinex_finalize (GObject * object);
/* initialize the retinex's class */
static void
@ -143,18 +138,16 @@ gst_retinex_class_init (GstRetinexClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *btrans_class = (GstBaseTransformClass *) klass;
GstOpencvVideoFilterClass *cvbasefilter_class =
(GstOpencvVideoFilterClass *) klass;
gobject_class->finalize = gst_retinex_finalize;
gobject_class->set_property = gst_retinex_set_property;
gobject_class->get_property = gst_retinex_get_property;
cvbasefilter_class->cv_trans_ip_func = gst_retinex_transform_ip;
cvbasefilter_class->cv_set_caps = gst_retinex_set_caps;
btrans_class->stop = gst_retinex_stop;
g_object_class_install_property (gobject_class, PROP_METHOD,
g_param_spec_enum ("method",
"Retinex method to use",
@ -192,6 +185,23 @@ gst_retinex_init (GstRetinex * filter)
TRUE);
}
static void
gst_retinex_finalize (GObject * object)
{
GstRetinex *filter;
filter = GST_RETINEX (object);
filter->cvA.release ();
filter->cvB.release ();
filter->cvC.release ();
filter->cvD.release ();
g_free (filter->weights);
filter->weights = NULL;
g_free (filter->sigmas);
filter->sigmas = NULL;
G_OBJECT_CLASS (gst_retinex_parent_class)->finalize (object);
}
static void
gst_retinex_set_property (GObject * object, guint prop_id,
@ -233,60 +243,31 @@ gst_retinex_get_property (GObject * object, guint prop_id,
static gboolean
gst_retinex_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels, gint out_width,
gint out_height, gint out_depth, gint out_channels)
gint in_height, int in_cv_type, gint out_width,
gint out_height, int out_cv_type)
{
GstRetinex *retinex = GST_RETINEX (filter);
CvSize size;
Size size;
size = cvSize (in_width, in_height);
size = Size (in_width, in_height);
if (retinex->cvA)
gst_retinex_release_all_images (retinex);
retinex->cvA = cvCreateImage (size, IPL_DEPTH_32F, in_channels);
retinex->cvB = cvCreateImage (size, IPL_DEPTH_32F, in_channels);
retinex->cvC = cvCreateImage (size, IPL_DEPTH_32F, in_channels);
retinex->cvD = cvCreateImage (size, IPL_DEPTH_32F, in_channels);
retinex->cvA.create (size, CV_32FC3);
retinex->cvB.create (size, CV_32FC3);
retinex->cvC.create (size, CV_32FC3);
retinex->cvD.create (size, CV_32FC3);
return TRUE;
}
static gboolean
gst_retinex_stop (GstBaseTransform * basesrc)
{
GstRetinex *filter = GST_RETINEX (basesrc);
if (filter->cvA != NULL)
gst_retinex_release_all_images (filter);
g_free (filter->weights);
filter->weights = NULL;
g_free (filter->sigmas);
filter->sigmas = NULL;
return TRUE;
}
static void
gst_retinex_release_all_images (GstRetinex * filter)
{
cvReleaseImage (&filter->cvA);
cvReleaseImage (&filter->cvB);
cvReleaseImage (&filter->cvC);
cvReleaseImage (&filter->cvD);
}
static GstFlowReturn
gst_retinex_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buf,
IplImage * img)
Mat img)
{
GstRetinex *retinex = GST_RETINEX (filter);
double sigma = 14.0;
int gain = 128;
int offset = 128;
int filter_size;
Mat icvD = cvarrToMat (retinex->cvD, false);
/* Basic retinex restoration. The image and a filtered image are converted
to the log domain and subtracted.
@ -294,22 +275,23 @@ gst_retinex_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buf,
where O is the output, H is a gaussian 2d filter and I is the input image. */
if (METHOD_BASIC == retinex->method) {
/* Compute log image */
cvConvert (img, retinex->cvA);
cvLog (retinex->cvA, retinex->cvB);
img.convertTo (retinex->cvA, retinex->cvA.type ());
log (retinex->cvA, retinex->cvB);
/* Compute log of blured image */
filter_size = (int) floor (sigma * 6) / 2;
filter_size = filter_size * 2 + 1;
cvConvert (img, retinex->cvD);
GaussianBlur (icvD, icvD, Size (filter_size, filter_size), 0.0, 0.0);
cvLog (retinex->cvD, retinex->cvC);
img.convertTo (retinex->cvD, retinex->cvD.type ());
GaussianBlur (retinex->cvD, retinex->cvD, Size (filter_size, filter_size),
0.0, 0.0);
log (retinex->cvD, retinex->cvC);
/* Compute difference */
cvSub (retinex->cvB, retinex->cvC, retinex->cvA, NULL);
subtract (retinex->cvB, retinex->cvC, retinex->cvA);
/* Restore */
cvConvertScale (retinex->cvA, img, (float) gain, (float) offset);
retinex->cvA.convertTo (img, img.type (), (float) gain, (float) offset);
}
/* Multiscale retinex restoration. The image and a set of filtered images are
converted to the log domain and subtracted from the original with some set
@ -337,25 +319,26 @@ gst_retinex_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buf,
}
/* Compute log image */
cvConvert (img, retinex->cvA);
cvLog (retinex->cvA, retinex->cvB);
img.convertTo (retinex->cvA, retinex->cvA.type ());
log (retinex->cvA, retinex->cvB);
/* Filter at each scale */
for (i = 0; i < retinex->scales; i++) {
filter_size = (int) floor (retinex->sigmas[i] * 6) / 2;
filter_size = filter_size * 2 + 1;
cvConvert (img, retinex->cvD);
GaussianBlur (icvD, icvD, Size (filter_size, filter_size), 0.0, 0.0);
cvLog (retinex->cvD, retinex->cvC);
img.convertTo (retinex->cvD, retinex->cvD.type ());
GaussianBlur (retinex->cvD, retinex->cvD, Size (filter_size, filter_size),
0.0, 0.0);
log (retinex->cvD, retinex->cvC);
/* Compute weighted difference */
cvScale (retinex->cvC, retinex->cvC, retinex->weights[i], 0.0);
cvSub (retinex->cvB, retinex->cvC, retinex->cvB, NULL);
retinex->cvC.convertTo (retinex->cvC, -1, retinex->weights[i], 0.0);
subtract (retinex->cvB, retinex->cvC, retinex->cvB);
}
/* Restore */
cvConvertScale (retinex->cvB, img, (float) gain, (float) offset);
retinex->cvB.convertTo (img, img.type (), (float) gain, (float) offset);
}
return GST_FLOW_OK;

View file

@ -72,10 +72,10 @@ struct _GstRetinex
double *weights;
double *sigmas;
IplImage *cvA;
IplImage *cvB;
IplImage *cvC;
IplImage *cvD;
cv::Mat cvA;
cv::Mat cvB;
cv::Mat cvC;
cv::Mat cvD;
};
struct _GstRetinexClass

View file

@ -92,16 +92,11 @@
#include "gstsegmentation.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
#define GST_CAT_DEFAULT gst_segmentation_debug
using namespace cv;
using namespace
cv::bgsegm;
/* Filter signals and args */
enum
@ -129,17 +124,12 @@ typedef enum
#define DEFAULT_LEARNING_RATE 0.01
#define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
static
GType
static GType
gst_segmentation_method_get_type (void)
{
static
GType
etype = 0;
static GType etype = 0;
if (etype == 0) {
static const
GEnumValue
values[] = {
static const GEnumValue values[] = {
{METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
{METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
{METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
@ -152,16 +142,12 @@ gst_segmentation_method_get_type (void)
G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
static
GstStaticPadTemplate
sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
static
GstStaticPadTemplate
src_factory = GST_STATIC_PAD_TEMPLATE ("src",
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
@ -174,64 +160,42 @@ static void
gst_segmentation_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static
GstFlowReturn
gst_segmentation_transform_ip (GstOpencvVideoFilter * filter,
GstBuffer * buffer, IplImage * img);
static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter *
filter, GstBuffer * buffer, Mat img);
static
gboolean
gst_segmentation_stop (GstBaseTransform * basesrc);
static
gboolean
gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
static void
gst_segmentation_release_all_pointers (GstSegmentation * filter);
static void gst_segmentation_finalize (GObject * object);
static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter,
gint in_width, gint in_height, int in_cv_type, gint out_width,
gint out_height, int out_cv_type);
/* Codebook algorithm + connected components functions*/
static int
update_codebook (unsigned char *p, codeBook * c,
static int update_codebook (unsigned char *p, codeBook * c,
unsigned *cbBounds, int numChannels);
static int
clear_stale_entries (codeBook * c);
static unsigned char
background_diff (unsigned char *p, codeBook * c,
static int clear_stale_entries (codeBook * c);
static unsigned char background_diff (unsigned char *p, codeBook * c,
int numChannels, int *minMod, int *maxMod);
static void
find_connected_components (IplImage * mask, int poly1_hull0,
float perimScale, CvMemStorage * mem_storage, CvSeq * contours);
static void find_connected_components (Mat mask, int poly1_hull0,
float perimScale);
/* MOG (Mixture-of-Gaussians functions */
static int
initialise_mog (GstSegmentation * filter);
static int
run_mog_iteration (GstSegmentation * filter);
static int
run_mog2_iteration (GstSegmentation * filter);
static int
finalise_mog (GstSegmentation * filter);
static int run_mog_iteration (GstSegmentation * filter);
static int run_mog2_iteration (GstSegmentation * filter);
/* initialize the segmentation's class */
static void
gst_segmentation_class_init (GstSegmentationClass * klass)
{
GObjectClass *
gobject_class;
GstElementClass *
element_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *
basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
GstOpencvVideoFilterClass *
cvfilter_class = (GstOpencvVideoFilterClass *) klass;
GObjectClass *gobject_class;
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstOpencvVideoFilterClass *cvfilter_class =
(GstOpencvVideoFilterClass *) klass;
gobject_class = (GObjectClass *) klass;
gobject_class->finalize = gst_segmentation_finalize;
gobject_class->set_property = gst_segmentation_set_property;
gobject_class->get_property = gst_segmentation_get_property;
basesrc_class->stop = gst_segmentation_stop;
cvfilter_class->cv_trans_ip_func = gst_segmentation_transform_ip;
cvfilter_class->cv_set_caps = gst_segmentation_set_caps;
@ -284,8 +248,7 @@ static void
gst_segmentation_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstSegmentation *
filter = GST_SEGMENTATION (object);
GstSegmentation *filter = GST_SEGMENTATION (object);
switch (prop_id) {
case PROP_METHOD:
@ -307,8 +270,7 @@ static void
gst_segmentation_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstSegmentation *
filter = GST_SEGMENTATION (object);
GstSegmentation *filter = GST_SEGMENTATION (object);
switch (prop_id) {
case PROP_METHOD:
@ -326,32 +288,26 @@ gst_segmentation_get_property (GObject * object, guint prop_id,
}
}
static
gboolean
static gboolean
gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstSegmentation *
segmentation = GST_SEGMENTATION (filter);
CvSize size;
GstSegmentation *segmentation = GST_SEGMENTATION (filter);
Size size;
size = cvSize (in_width, in_height);
size = Size (in_width, in_height);
segmentation->width = in_width;
segmentation->height = in_height;
if (NULL != segmentation->cvRGB)
gst_segmentation_release_all_pointers (segmentation);
segmentation->cvRGB.create (size, CV_8UC3);
segmentation->cvYUV.create (size, CV_8UC3);
segmentation->cvRGB = cvCreateImage (size, IPL_DEPTH_8U, 3);
segmentation->cvYUV = cvCreateImage (size, IPL_DEPTH_8U, 3);
segmentation->cvFG = Mat::zeros (size, CV_8UC1);
segmentation->cvFG = cvCreateImage (size, IPL_DEPTH_8U, 1);
cvZero (segmentation->cvFG);
segmentation->ch1 = cvCreateImage (size, IPL_DEPTH_8U, 1);
segmentation->ch2 = cvCreateImage (size, IPL_DEPTH_8U, 1);
segmentation->ch3 = cvCreateImage (size, IPL_DEPTH_8U, 1);
segmentation->ch1.create (size, CV_8UC1);
segmentation->ch2.create (size, CV_8UC1);
segmentation->ch3.create (size, CV_8UC1);
/* Codebook method */
segmentation->TcodeBook = (codeBook *)
@ -364,56 +320,43 @@ gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
segmentation->learning_interval = (int) (1.0 / segmentation->learning_rate);
/* Mixture-of-Gaussians (mog) methods */
initialise_mog (segmentation);
segmentation->mog = bgsegm::createBackgroundSubtractorMOG ();
segmentation->mog2 = createBackgroundSubtractorMOG2 ();
return TRUE;
}
/* Clean up */
static
gboolean
gst_segmentation_stop (GstBaseTransform * basesrc)
{
GstSegmentation *
filter = GST_SEGMENTATION (basesrc);
if (filter->cvRGB != NULL)
gst_segmentation_release_all_pointers (filter);
return TRUE;
}
static void
gst_segmentation_release_all_pointers (GstSegmentation * filter)
gst_segmentation_finalize (GObject * object)
{
cvReleaseImage (&filter->cvRGB);
cvReleaseImage (&filter->cvYUV);
cvReleaseImage (&filter->cvFG);
cvReleaseImage (&filter->ch1);
cvReleaseImage (&filter->ch2);
cvReleaseImage (&filter->ch3);
cvReleaseMemStorage (&filter->mem_storage);
GstSegmentation *filter = GST_SEGMENTATION (object);
filter->cvRGB.release ();
filter->cvYUV.release ();
filter->cvFG.release ();
filter->ch1.release ();
filter->ch2.release ();
filter->ch3.release ();
filter->mog.release ();
filter->mog2.release ();
g_free (filter->TcodeBook);
finalise_mog (filter);
G_OBJECT_CLASS (gst_segmentation_parent_class)->finalize (object);
}
static
GstFlowReturn
static GstFlowReturn
gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
GstBuffer * buffer, IplImage * img)
GstBuffer * buffer, Mat img)
{
GstSegmentation *
filter = GST_SEGMENTATION (cvfilter);
int
j;
GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
int j;
filter->framecount++;
/* Image preprocessing: color space conversion etc */
cvCvtColor (img, filter->cvRGB, CV_RGBA2RGB);
cvCvtColor (filter->cvRGB, filter->cvYUV, CV_RGB2YCrCb);
cvtColor (img, filter->cvRGB, COLOR_RGBA2RGB);
cvtColor (filter->cvRGB, filter->cvYUV, COLOR_RGB2YCrCb);
/* Create and update a fg/bg model using a codebook approach following the
* opencv O'Reilly book [1] implementation of the algo described in [2].
@ -423,24 +366,22 @@ gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
* [2] "Real-time Foreground-Background Segmentation using Codebook Model",
* Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
if (METHOD_BOOK == filter->method) {
unsigned
cbBounds[3] = { 10, 5, 5 };
int
minMod[3] = { 20, 20, 20 }, maxMod[3] = {
unsigned cbBounds[3] = { 10, 5, 5 };
int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
20, 20, 20
};
if (filter->framecount < 30) {
/* Learning background phase: update_codebook on every frame */
for (j = 0; j < filter->width * filter->height; j++) {
update_codebook ((unsigned char *) filter->cvYUV->imageData + j * 3,
update_codebook (filter->cvYUV.data + j * 3,
(codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
}
} else {
/* this updating is responsible for FG becoming BG again */
if (filter->framecount % filter->learning_interval == 0) {
for (j = 0; j < filter->width * filter->height; j++) {
update_codebook ((uchar *) filter->cvYUV->imageData + j * 3,
update_codebook (filter->cvYUV.data + j * 3,
(codeBook *) & (filter->TcodeBook[j]), cbBounds, 3);
}
}
@ -451,18 +392,17 @@ gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
for (j = 0; j < filter->width * filter->height; j++) {
if (background_diff
((uchar *) filter->cvYUV->imageData + j * 3,
(filter->cvYUV.data + j * 3,
(codeBook *) & (filter->TcodeBook[j]), 3, minMod, maxMod)) {
filter->cvFG->imageData[j] = (char) 255;
filter->cvFG.data[j] = (char) 255;
} else {
filter->cvFG->imageData[j] = 0;
filter->cvFG.data[j] = 0;
}
}
}
/* 3rd param is the smallest area to show: (w+h)/param , in pixels */
find_connected_components (filter->cvFG, 1, 10000,
filter->mem_storage, filter->contours);
find_connected_components (filter->cvFG, 1, 10000);
}
/* Create the foreground and background masks using BackgroundSubtractorMOG [1],
@ -492,15 +432,19 @@ gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
}
/* if we want to test_mode, just overwrite the output */
if (filter->test_mode) {
cvCvtColor (filter->cvFG, filter->cvRGB, CV_GRAY2RGB);
std::vector < cv::Mat > channels (3);
cvSplit (filter->cvRGB, filter->ch1, filter->ch2, filter->ch3, NULL);
if (filter->test_mode) {
cvtColor (filter->cvFG, filter->cvRGB, COLOR_GRAY2RGB);
split (filter->cvRGB, channels);
} else
cvSplit (img, filter->ch1, filter->ch2, filter->ch3, NULL);
split (img, channels);
channels.push_back (filter->cvFG);
/* copy anyhow the fg/bg to the alpha channel in the output image */
cvMerge (filter->ch1, filter->ch2, filter->ch3, filter->cvFG, img);
merge (channels, img);
return GST_FLOW_OK;
@ -543,14 +487,9 @@ update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
int numChannels)
{
/* c->t+=1; */
unsigned int
high[3],
low[3];
int
n,
i;
int
matchChannel;
unsigned int high[3], low[3];
int n, i;
int matchChannel;
for (n = 0; n < numChannels; n++) {
high[n] = p[n] + cbBounds[n];
@ -589,15 +528,13 @@ update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
/* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
for (int s = 0; s < c->numEntries; s++) {
/* Track which codebook entries are going stale: */
int
negRun = c->t - c->cb[s]->t_last_update;
int negRun = c->t - c->cb[s]->t_last_update;
if (c->cb[s]->stale < negRun)
c->cb[s]->stale = negRun;
}
/* ENTER A NEW CODEWORD IF NEEDED */
if (i == c->numEntries) { /* if no existing codeword found, make one */
code_element **
foo =
code_element **foo =
(code_element **) g_malloc (sizeof (code_element *) *
(c->numEntries + 1));
for (int ii = 0; ii < c->numEntries; ii++) {
@ -644,18 +581,12 @@ update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
int
clear_stale_entries (codeBook * c)
{
int
staleThresh = c->t >> 1;
int *
keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
int
keepCnt = 0;
code_element **
foo;
int
k;
int
numCleared;
int staleThresh = c->t >> 1;
int *keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
int keepCnt = 0;
code_element **foo;
int k;
int numCleared;
/* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
for (int i = 0; i < c->numEntries; i++) {
if (c->cb[i]->stale > staleThresh)
@ -715,11 +646,9 @@ unsigned char
background_diff (unsigned char *p, codeBook * c, int numChannels,
int *minMod, int *maxMod)
{
int
matchChannel;
int matchChannel;
/* SEE IF THIS FITS AN EXISTING CODEWORD */
int
i;
int i;
for (i = 0; i < c->numEntries; i++) {
matchChannel = 0;
for (int n = 0; n < numChannels; n++) {
@ -771,94 +700,53 @@ background_diff (unsigned char *p, codeBook * c, int numChannels,
/* How many iterations of erosion and/or dilation there should be */
#define CVCLOSE_ITR 1
static void
find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
CvMemStorage * mem_storage, CvSeq * contours)
find_connected_components (Mat mask, int poly1_hull0, float perimScale)
{
CvContourScanner scanner;
CvSeq *
c;
int
numCont = 0;
/* Just some convenience variables */
const
CvScalar
CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
const
CvScalar
CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
const Scalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
//const Scalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
int idx = 0;
/* CLEAN UP RAW MASK */
cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
cvMorphologyEx (mask, mask, 0, 0, CV_MOP_CLOSE, CVCLOSE_ITR);
morphologyEx (mask, mask, MORPH_OPEN, Mat (), Point (-1, -1), CVCLOSE_ITR);
morphologyEx (mask, mask, MORPH_CLOSE, Mat (), Point (-1, -1), CVCLOSE_ITR);
/* FIND CONTOURS AROUND ONLY BIGGER REGIONS */
if (mem_storage == NULL) {
mem_storage = cvCreateMemStorage (0);
} else {
cvClearMemStorage (mem_storage);
}
scanner = cvStartFindContours (mask, mem_storage, sizeof (CvContour),
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));
std::vector < std::vector < Point > >contours;
std::vector < std::vector < Point > >to_draw;
std::vector < Vec4i > hierarchy;
findContours (mask, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE,
Point (0, 0));
if (contours.size () == 0)
return;
while ((c = cvFindNextContour (scanner)) != NULL) {
double
len = cvContourArea (c, CV_WHOLE_SEQ, 0);
/* calculate perimeter len threshold: */
double
q = (mask->height + mask->width) / perimScale;
/* Get rid of blob if its perimeter is too small: */
if (len < q) {
cvSubstituteContour (scanner, NULL);
} else {
/* Smooth its edges if its large enough */
CvSeq *
c_new;
for (; idx >= 0; idx = hierarchy[idx][0]) {
const std::vector < Point > &c = contours[idx];
double len = fabs (contourArea (Mat (c)));
double q = (mask.size ().height + mask.size ().width) / perimScale;
if (len >= q) {
std::vector < Point > c_new;
if (poly1_hull0) {
/* Polygonal approximation */
c_new =
cvApproxPoly (c, sizeof (CvContour), mem_storage, CV_POLY_APPROX_DP,
CVCONTOUR_APPROX_LEVEL, 0);
approxPolyDP (c, c_new, CVCONTOUR_APPROX_LEVEL, (hierarchy[idx][2] < 0
&& hierarchy[idx][3] < 0));
} else {
/* Convex Hull of the segmentation */
c_new = cvConvexHull2 (c, mem_storage, CV_CLOCKWISE, 1);
convexHull (c, c_new, true, true);
}
cvSubstituteContour (scanner, c_new);
numCont++;
to_draw.push_back (c_new);
}
}
contours = cvEndFindContours (&scanner);
/* PAINT THE FOUND REGIONS BACK INTO THE IMAGE */
cvZero (mask);
/* DRAW PROCESSED CONTOURS INTO THE MASK */
for (c = contours; c != NULL; c = c->h_next)
cvDrawContours (mask, c, CVX_WHITE, CVX_BLACK, -1, CV_FILLED, 8, cvPoint (0,
0));
mask.setTo (Scalar::all (0));
if (to_draw.size () > 0) {
drawContours (mask, to_draw, -1, CVX_WHITE, FILLED);
}
}
#endif /*ifdef CODE_FROM_OREILLY_BOOK */
int
initialise_mog (GstSegmentation * filter)
{
filter->img_input_as_cvMat = (void *) new
Mat (cvarrToMat (filter->cvYUV, false));
filter->img_fg_as_cvMat = (void *) new Mat (cvarrToMat (filter->cvFG, false));
filter->mog = bgsegm::createBackgroundSubtractorMOG ();
filter->mog2 = createBackgroundSubtractorMOG2 ();
return (0);
}
int
run_mog_iteration (GstSegmentation * filter)
{
((cv::Mat *) filter->img_input_as_cvMat)->data =
(uchar *) filter->cvYUV->imageData;
((cv::Mat *) filter->img_fg_as_cvMat)->data =
(uchar *) filter->cvFG->imageData;
/*
BackgroundSubtractorMOG [1], Gaussian Mixture-based Background/Foreground
Segmentation Algorithm. OpenCV MOG implements the algorithm described in [2].
@ -869,8 +757,7 @@ run_mog_iteration (GstSegmentation * filter)
European Workshop on Advanced Video-Based Surveillance Systems, 2001
*/
filter->mog->apply (*((Mat *) filter->img_input_as_cvMat),
*((Mat *) filter->img_fg_as_cvMat), filter->learning_rate);
filter->mog->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);
return (0);
}
@ -878,10 +765,6 @@ run_mog_iteration (GstSegmentation * filter)
int
run_mog2_iteration (GstSegmentation * filter)
{
((Mat *) filter->img_input_as_cvMat)->data =
(uchar *) filter->cvYUV->imageData;
((Mat *) filter->img_fg_as_cvMat)->data = (uchar *) filter->cvFG->imageData;
/*
BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
segmentation algorithm. OpenCV MOG2 implements the algorithm described in
@ -895,20 +778,7 @@ run_mog2_iteration (GstSegmentation * filter)
Letters, vol. 27, no. 7, pages 773-780, 2006.
*/
filter->mog2->apply (*((Mat *) filter->img_input_as_cvMat),
*((Mat *) filter->img_fg_as_cvMat), filter->learning_rate);
return (0);
}
int
finalise_mog (GstSegmentation * filter)
{
delete (Mat *) filter->img_input_as_cvMat;
delete (Mat *) filter->img_fg_as_cvMat;
filter->mog.release ();
filter->mog2.release ();
filter->mog2->apply (filter->cvYUV, filter->cvFG, filter->learning_rate);
return (0);
}

View file

@ -50,9 +50,6 @@
#include <opencv2/video.hpp>
#include <opencv2/core.hpp>
#include <opencv2/bgsegm.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/types_c.h>
#endif
G_BEGIN_DECLS
/* #defines don't like whitespacey bits */
@ -96,25 +93,21 @@ struct _GstSegmentation
gboolean test_mode;
gint width, height;
IplImage *cvRGB;
IplImage *cvYUV;
cv::Mat cvRGB;
cv::Mat cvYUV;
IplImage *cvFG; /* used for the alpha BW 1ch image composition */
IplImage *ch1, *ch2, *ch3;
cv::Mat cvFG; /* used for the alpha BW 1ch image composition */
cv::Mat ch1, ch2, ch3;
int framecount;
/* for codebook approach */
codeBook *TcodeBook;
int learning_interval;
CvMemStorage *mem_storage;
CvSeq *contours;
/* for MOG methods */
cv::Ptr<cv::BackgroundSubtractor> mog; /* cv::BackgroundSubtractorMOG */
cv::Ptr<cv::BackgroundSubtractorMOG2> mog2; /* cv::BackgroundSubtractorMOG2 */
void *img_input_as_cvMat; /* cv::Mat */
void *img_fg_as_cvMat; /* cv::Mat */
double learning_rate;
};

View file

@ -60,9 +60,6 @@
#include "gstskindetect.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_skin_detect_debug);
#define GST_CAT_DEFAULT gst_skin_detect_debug
@ -121,14 +118,13 @@ static void gst_skin_detect_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_skin_detect_transform (GstOpencvVideoFilter * filter,
GstBuffer * buf, IplImage * img, GstBuffer * outbuf, IplImage * outimg);
GstBuffer * buf, cv::Mat img, GstBuffer * outbuf, cv::Mat outimg);
static gboolean gst_skin_detect_stop (GstBaseTransform * basesrc);
static void gst_skin_detect_finalize (GObject * object);
static gboolean
gst_skin_detect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
static void gst_skin_detect_release_all_images (GstSkinDetect * filter);
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type);
/* initialize the skindetect's class */
static void
@ -136,12 +132,12 @@ gst_skin_detect_class_init (GstSkinDetectClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
GstOpencvVideoFilterClass *gstopencvbasefilter_class;
gobject_class = (GObjectClass *) klass;
gstopencvbasefilter_class = (GstOpencvVideoFilterClass *) klass;
gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_skin_detect_finalize);
gobject_class->set_property = gst_skin_detect_set_property;
gobject_class->get_property = gst_skin_detect_get_property;
@ -168,7 +164,6 @@ gst_skin_detect_class_init (GstSkinDetectClass * klass)
gst_element_class_add_static_pad_template (element_class, &src_factory);
gst_element_class_add_static_pad_template (element_class, &sink_factory);
basesrc_class->stop = gst_skin_detect_stop;
gstopencvbasefilter_class->cv_set_caps = gst_skin_detect_set_caps;
}
@ -230,144 +225,140 @@ gst_skin_detect_get_property (GObject * object, guint prop_id,
/* this function handles the link with other elements */
static gboolean
gst_skin_detect_set_caps (GstOpencvVideoFilter * transform,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
gint in_width, gint in_height, int in_cv_type,
gint out_width, gint out_height, int out_cv_type)
{
GstSkinDetect *filter = GST_SKIN_DETECT (transform);
CvSize size = cvSize (in_width, in_height);
cv::Size size = cv::Size (in_width, in_height);
// If cvRGB is already allocated, it means there's a cap modification,
// so release first all the images.
if (NULL != filter->cvRGB)
gst_skin_detect_release_all_images (filter);
filter->cvRGB = cvCreateImageHeader (size, IPL_DEPTH_8U, 3);
filter->cvSkin = cvCreateImageHeader (size, IPL_DEPTH_8U, 3);
filter->cvChA = cvCreateImage (size, IPL_DEPTH_8U, 1);
filter->cvRGB.create (size, CV_8UC3);
filter->cvChA.create (size, CV_8UC1);
filter->width = in_width;
filter->height = in_height;
filter->cvHSV = cvCreateImage (size, IPL_DEPTH_8U, 3);
filter->cvH = cvCreateImage (size, 8, 1); /* Hue component. */
filter->cvH2 = cvCreateImage (size, 8, 1); /* Hue component, 2nd threshold */
filter->cvS = cvCreateImage (size, 8, 1); /* Saturation component. */
filter->cvV = cvCreateImage (size, 8, 1); /* Brightness component. */
filter->cvSkinPixels1 = cvCreateImage (size, 8, 1); /* Greyscale output image */
filter->cvHSV.create (size, CV_8UC3);
filter->cvH.create (size, CV_8UC1); /* Hue component. */
filter->cvH2.create (size, CV_8UC1); /* Hue component, 2nd threshold */
filter->cvS.create (size, CV_8UC1); /* Saturation component. */
filter->cvV.create (size, CV_8UC1); /* Brightness component. */
filter->cvSkinPixels1.create (size, CV_8UC1); /* Greyscale output image */
filter->cvR = cvCreateImage (size, 8, 1); /* R component. */
filter->cvG = cvCreateImage (size, 8, 1); /* G component. */
filter->cvB = cvCreateImage (size, 8, 1); /* B component. */
filter->cvAll = cvCreateImage (size, IPL_DEPTH_32F, 1); /* (R+G+B) component. */
filter->cvR2 = cvCreateImage (size, IPL_DEPTH_32F, 1); /* R component, 32bits */
filter->cvRp = cvCreateImage (size, IPL_DEPTH_32F, 1); /* R' and >0.4 */
filter->cvGp = cvCreateImage (size, IPL_DEPTH_32F, 1); /* G' and > 0.28 */
filter->cvRp2 = cvCreateImage (size, IPL_DEPTH_32F, 1); /* R' <0.6 */
filter->cvGp2 = cvCreateImage (size, IPL_DEPTH_32F, 1); /* G' <0.4 */
filter->cvSkinPixels2 = cvCreateImage (size, IPL_DEPTH_32F, 1); /* Greyscale output image. */
filter->cvdraft = cvCreateImage (size, IPL_DEPTH_8U, 1); /* Greyscale output image. */
filter->cvR.create (size, CV_8UC1); /* R component. */
filter->cvG.create (size, CV_8UC1); /* G component. */
filter->cvB.create (size, CV_8UC1); /* B component. */
filter->cvAll.create (size, CV_32FC1); /* (R+G+B) component. */
filter->cvR2.create (size, CV_32FC1); /* R component, 32bits */
filter->cvRp.create (size, CV_32FC1); /* R' and >0.4 */
filter->cvGp.create (size, CV_32FC1); /* G' and > 0.28 */
filter->cvRp2.create (size, CV_32FC1); /* R' <0.6 */
filter->cvGp2.create (size, CV_32FC1); /* G' <0.4 */
filter->cvSkinPixels2.create (size, CV_32FC1); /* Greyscale output image. */
filter->cvdraft.create (size, CV_8UC1); /* Greyscale output image. */
return TRUE;
}
/* Clean up */
static gboolean
gst_skin_detect_stop (GstBaseTransform * basesrc)
{
GstSkinDetect *filter = GST_SKIN_DETECT (basesrc);
if (filter->cvRGB != NULL)
gst_skin_detect_release_all_images (filter);
return TRUE;
}
static void
gst_skin_detect_release_all_images (GstSkinDetect * filter)
gst_skin_detect_finalize (GObject * object)
{
cvReleaseImage (&filter->cvRGB);
cvReleaseImage (&filter->cvSkin);
cvReleaseImage (&filter->cvChA);
GstSkinDetect *filter = GST_SKIN_DETECT (object);
cvReleaseImage (&filter->cvHSV);
cvReleaseImage (&filter->cvH);
cvReleaseImage (&filter->cvH2);
cvReleaseImage (&filter->cvS);
cvReleaseImage (&filter->cvV);
cvReleaseImage (&filter->cvSkinPixels1);
filter->cvRGB.release ();
filter->cvChA.release ();
filter->cvHSV.release ();
filter->cvH.release ();
filter->cvH2.release ();
filter->cvS.release ();
filter->cvV.release ();
filter->cvSkinPixels1.release ();
filter->cvR.release ();
filter->cvG.release ();
filter->cvB.release ();
filter->cvAll.release ();
filter->cvR2.release ();
filter->cvRp.release ();
filter->cvGp.release ();
filter->cvRp2.release ();
filter->cvGp2.release ();
filter->cvdraft.release ();
filter->cvSkinPixels2.release ();
cvReleaseImage (&filter->cvR);
cvReleaseImage (&filter->cvG);
cvReleaseImage (&filter->cvB);
cvReleaseImage (&filter->cvAll);
cvReleaseImage (&filter->cvR2);
cvReleaseImage (&filter->cvRp);
cvReleaseImage (&filter->cvGp);
cvReleaseImage (&filter->cvRp2);
cvReleaseImage (&filter->cvGp2);
cvReleaseImage (&filter->cvSkinPixels2);
cvReleaseImage (&filter->cvdraft);
G_OBJECT_CLASS (gst_skin_detect_parent_class)->finalize (object);
}
static GstFlowReturn
gst_skin_detect_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img, GstBuffer * outbuf, IplImage * outimg)
cv::Mat img, GstBuffer * outbuf, cv::Mat outimg)
{
GstSkinDetect *filter = GST_SKIN_DETECT (base);
filter->cvRGB->imageData = (char *) img->imageData;
filter->cvSkin->imageData = (char *) outimg->imageData;
std::vector < cv::Mat > channels (3);
filter->cvRGB = cv::Mat (img);
/* SKIN COLOUR BLOB DETECTION */
if (HSV == filter->method) {
cvCvtColor (filter->cvRGB, filter->cvHSV, CV_RGB2HSV);
cvSplit (filter->cvHSV, filter->cvH, filter->cvS, filter->cvV, 0); /* Extract the 3 color components. */
cv::cvtColor (filter->cvRGB, filter->cvHSV, cv::COLOR_RGB2HSV);
cv::split (filter->cvHSV, channels);
filter->cvH = channels.at (0);
filter->cvS = channels.at (1);
filter->cvV = channels.at (2);
/* Detect which pixels in each of the H, S and V channels are probably skin pixels.
Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80. */
cvThreshold (filter->cvH, filter->cvH2, 10, UCHAR_MAX, CV_THRESH_BINARY); /* (hue > 10) */
cvThreshold (filter->cvH, filter->cvH, 20, UCHAR_MAX, CV_THRESH_BINARY_INV); /* (hue < 20) */
cvThreshold (filter->cvS, filter->cvS, 48, UCHAR_MAX, CV_THRESH_BINARY); /* (sat > 48) */
cvThreshold (filter->cvV, filter->cvV, 80, UCHAR_MAX, CV_THRESH_BINARY); /* (val > 80) */
cv::threshold (filter->cvH, filter->cvH2, 10, UCHAR_MAX, cv::THRESH_BINARY); /* (hue > 10) */
cv::threshold (filter->cvH, filter->cvH, 20, UCHAR_MAX, cv::THRESH_BINARY_INV); /* (hue < 20) */
cv::threshold (filter->cvS, filter->cvS, 48, UCHAR_MAX, cv::THRESH_BINARY); /* (sat > 48) */
cv::threshold (filter->cvV, filter->cvV, 80, UCHAR_MAX, cv::THRESH_BINARY); /* (val > 80) */
/* erode the HUE to get rid of noise. */
cvErode (filter->cvH, filter->cvH, NULL, 1);
cv::erode (filter->cvH, filter->cvH, cv::Mat (), cv::Point (-1, -1), 1);
/* Combine all 3 thresholded color components, so that an output pixel will only
be white (255) if the H, S and V pixels were also white.
imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where ^ mean pixels-wise AND */
cvAnd (filter->cvH, filter->cvS, filter->cvSkinPixels1, NULL);
cvAnd (filter->cvSkinPixels1, filter->cvH2, filter->cvSkinPixels1, NULL);
cvAnd (filter->cvSkinPixels1, filter->cvV, filter->cvSkinPixels1, NULL);
cv::bitwise_and (filter->cvH, filter->cvS, filter->cvSkinPixels1);
cv::bitwise_and (filter->cvSkinPixels1, filter->cvH2,
filter->cvSkinPixels1);
cv::bitwise_and (filter->cvSkinPixels1, filter->cvV, filter->cvSkinPixels1);
cvCvtColor (filter->cvSkinPixels1, filter->cvRGB, CV_GRAY2RGB);
cv::cvtColor (filter->cvSkinPixels1, filter->cvRGB, cv::COLOR_GRAY2RGB);
} else if (RGB == filter->method) {
cvSplit (filter->cvRGB, filter->cvR, filter->cvG, filter->cvB, 0); /* Extract the 3 color components. */
cvAdd (filter->cvR, filter->cvG, filter->cvAll, NULL);
cvAdd (filter->cvB, filter->cvAll, filter->cvAll, NULL); /* All = R + G + B */
cvDiv (filter->cvR, filter->cvAll, filter->cvRp, 1.0); /* R' = R / ( R + G + B) */
cvDiv (filter->cvG, filter->cvAll, filter->cvGp, 1.0); /* G' = G / ( R + G + B) */
cv::split (filter->cvRGB, channels);
filter->cvR = channels.at (0);
filter->cvG = channels.at (1);
filter->cvB = channels.at (2);
cv::add (filter->cvR, filter->cvG, filter->cvAll);
cv::add (filter->cvB, filter->cvAll, filter->cvAll); /* All = R + G + B */
cv::divide (filter->cvR, filter->cvAll, filter->cvRp, 1.0, filter->cvRp.type ()); /* R' = R / ( R + G + B) */
cv::divide (filter->cvG, filter->cvAll, filter->cvGp, 1.0, filter->cvGp.type ()); /* G' = G / ( R + G + B) */
cvConvertScale (filter->cvR, filter->cvR2, 1.0, 0.0);
cvCopy (filter->cvGp, filter->cvGp2, NULL);
cvCopy (filter->cvRp, filter->cvRp2, NULL);
filter->cvR.convertTo (filter->cvR2, filter->cvR2.type (), 1.0, 0.0);
filter->cvGp.copyTo (filter->cvGp2);
filter->cvRp.copyTo (filter->cvRp2);
cvThreshold (filter->cvR2, filter->cvR2, 60, UCHAR_MAX, CV_THRESH_BINARY); /* (R > 60) */
cvThreshold (filter->cvRp, filter->cvRp, 0.42, UCHAR_MAX, CV_THRESH_BINARY); /* (R'> 0.4) */
cvThreshold (filter->cvRp2, filter->cvRp2, 0.6, UCHAR_MAX, CV_THRESH_BINARY_INV); /* (R'< 0.6) */
cvThreshold (filter->cvGp, filter->cvGp, 0.28, UCHAR_MAX, CV_THRESH_BINARY); /* (G'> 0.28) */
cvThreshold (filter->cvGp2, filter->cvGp2, 0.4, UCHAR_MAX, CV_THRESH_BINARY_INV); /* (G'< 0.4) */
cv::threshold (filter->cvR2, filter->cvR2, 60, UCHAR_MAX, cv::THRESH_BINARY); /* (R > 60) */
cv::threshold (filter->cvRp, filter->cvRp, 0.42, UCHAR_MAX, cv::THRESH_BINARY); /* (R'> 0.4) */
cv::threshold (filter->cvRp2, filter->cvRp2, 0.6, UCHAR_MAX, cv::THRESH_BINARY_INV); /* (R'< 0.6) */
cv::threshold (filter->cvGp, filter->cvGp, 0.28, UCHAR_MAX, cv::THRESH_BINARY); /* (G'> 0.28) */
cv::threshold (filter->cvGp2, filter->cvGp2, 0.4, UCHAR_MAX, cv::THRESH_BINARY_INV); /* (G'< 0.4) */
/* Combine all 3 thresholded color components, so that an output pixel will only
be white (255) if the H, S and V pixels were also white. */
cvAnd (filter->cvR2, filter->cvRp, filter->cvSkinPixels2, NULL);
cvAnd (filter->cvRp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
cvAnd (filter->cvRp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
cvAnd (filter->cvGp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
cvAnd (filter->cvGp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
cv::bitwise_and (filter->cvR2, filter->cvRp, filter->cvSkinPixels2);
cv::bitwise_and (filter->cvRp, filter->cvSkinPixels2,
filter->cvSkinPixels2);
cv::bitwise_and (filter->cvRp2, filter->cvSkinPixels2,
filter->cvSkinPixels2);
cv::bitwise_and (filter->cvGp, filter->cvSkinPixels2,
filter->cvSkinPixels2);
cv::bitwise_and (filter->cvGp2, filter->cvSkinPixels2,
filter->cvSkinPixels2);
cvConvertScale (filter->cvSkinPixels2, filter->cvdraft, 1.0, 0.0);
cvCvtColor (filter->cvdraft, filter->cvRGB, CV_GRAY2RGB);
filter->cvSkinPixels2.convertTo (filter->cvdraft, filter->cvdraft.type (),
1.0, 0.0);
cv::cvtColor (filter->cvdraft, filter->cvRGB, cv::COLOR_GRAY2RGB);
}
/* After this we have a RGB Black and white image with the skin, in
@ -376,19 +367,20 @@ gst_skin_detect_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
the goal of removing small (spurious) skin spots and creating large
connected areas */
if (filter->postprocess) {
cvSplit (filter->cvRGB, filter->cvChA, NULL, NULL, NULL);
cv::split (filter->cvRGB, channels);
filter->cvChA = channels.at (0);
cvErode (filter->cvChA, filter->cvChA,
cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);
cvDilate (filter->cvChA, filter->cvChA,
cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 2);
cvErode (filter->cvChA, filter->cvChA,
cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);
cv::Mat element =
cv::getStructuringElement (cv::MORPH_RECT, cv::Size (3, 3),
cv::Point (1, 1));
cv::erode (filter->cvChA, filter->cvChA, element, cv::Point (1, 1), 1);
cv::dilate (filter->cvChA, filter->cvChA, element, cv::Point (1, 1), 2);
cv::erode (filter->cvChA, filter->cvChA, element, cv::Point (1, 1), 1);
cvCvtColor (filter->cvChA, filter->cvRGB, CV_GRAY2RGB);
cv::cvtColor (filter->cvChA, filter->cvRGB, cv::COLOR_GRAY2RGB);
}
cvCopy (filter->cvRGB, filter->cvSkin, NULL);
filter->cvRGB.copyTo (outimg);
return GST_FLOW_OK;
}

View file

@ -71,10 +71,10 @@ struct _GstSkinDetect
gint method;
gint width, height;
IplImage *cvChA, *cvRGB, *cvSkin;
IplImage *cvHSV, *cvH, *cvH2, *cvS, *cvV, *cvSkinPixels1;
IplImage *cvR, *cvG, *cvB, *cvAll, *cvR2, *cvRp, *cvGp, *cvRp2, *cvGp2,
*cvdraft, *cvSkinPixels2;
cv::Mat cvChA, cvRGB;
cv::Mat cvHSV, cvH, cvH2, cvS, cvV, cvSkinPixels1;
cv::Mat cvR, cvG, cvB, cvAll, cvR2, cvRp, cvGp, cvRp2, cvGp2,
cvdraft, cvSkinPixels2;
};
struct _GstSkinDetectClass

View file

@ -52,7 +52,7 @@
* <refsect2>
* <title>Example launch line</title>
* |[
* gst-launch-1.0 videotestsrc ! decodebin ! videoconvert ! templatematch template=/path/to/file.jpg ! videoconvert ! xvimagesink
* gst-launch-1.0 videotestsrc ! videoconvert ! templatematch template=/path/to/file.jpg ! videoconvert ! xvimagesink
* ]|
* </refsect2>
*/
@ -64,13 +64,7 @@
#include <gst/gst-i18n-plugin.h>
#include "gsttemplatematch.h"
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgcodecs/legacy/constants_c.h>
#else
#include <opencv2/imgcodecs/imgcodecs_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_template_match_debug);
#define GST_CAT_DEFAULT gst_template_match_debug
@ -116,7 +110,7 @@ static void gst_template_match_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_template_match_transform_ip (GstOpencvVideoFilter *
filter, GstBuffer * buf, IplImage * img);
filter, GstBuffer * buf, cv::Mat img);
/* initialize the templatematch's class */
static void
@ -168,8 +162,7 @@ gst_template_match_init (GstTemplateMatch * filter)
{
filter->templ = NULL;
filter->display = TRUE;
filter->cvTemplateImage = NULL;
filter->cvDistImage = NULL;
filter->reload_dist_image = TRUE;
filter->method = DEFAULT_METHOD;
gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER_CAST (filter),
@ -180,22 +173,12 @@ gst_template_match_init (GstTemplateMatch * filter)
static void
gst_template_match_load_template (GstTemplateMatch * filter, gchar * templ)
{
gchar *oldTemplateFilename = NULL;
IplImage *oldTemplateImage = NULL, *newTemplateImage = NULL, *oldDistImage =
NULL;
cv::Mat newTemplateImage;
if (templ) {
#if (CV_MAJOR_VERSION >= 4)
cv::Mat mat = cv::imread (templ);
newTemplateImage =
cvCreateImage (cvSize (mat.cols, mat.rows), cvIplDepth (mat.flags),
mat.channels ());
IplImage ipltemp = mat;
cvCopy (&ipltemp, newTemplateImage);
#else
newTemplateImage = cvLoadImage (templ, CV_LOAD_IMAGE_COLOR);
#endif
if (!newTemplateImage) {
newTemplateImage = cv::imread (templ);
if (newTemplateImage.empty ()) {
/* Unfortunately OpenCV doesn't seem to provide any way of finding out
why the image load failed, so we can't be more specific than FAILED: */
GST_ELEMENT_WARNING (filter, RESOURCE, FAILED,
@ -207,18 +190,12 @@ gst_template_match_load_template (GstTemplateMatch * filter, gchar * templ)
}
GST_OBJECT_LOCK (filter);
oldTemplateFilename = filter->templ;
g_free (filter->templ);
filter->templ = templ;
oldTemplateImage = filter->cvTemplateImage;
filter->cvTemplateImage = newTemplateImage;
oldDistImage = filter->cvDistImage;
/* This will be recreated in the chain function as required: */
filter->cvDistImage = NULL;
filter->cvTemplateImage = cv::Mat (newTemplateImage);
filter->reload_dist_image = TRUE;
GST_OBJECT_UNLOCK (filter);
cvReleaseImage (&oldDistImage);
cvReleaseImage (&oldTemplateImage);
g_free (oldTemplateFilename);
}
static void
@ -232,22 +209,22 @@ gst_template_match_set_property (GObject * object, guint prop_id,
GST_OBJECT_LOCK (filter);
switch (g_value_get_int (value)) {
case 0:
filter->method = CV_TM_SQDIFF;
filter->method = cv::TM_SQDIFF;
break;
case 1:
filter->method = CV_TM_SQDIFF_NORMED;
filter->method = cv::TM_SQDIFF_NORMED;
break;
case 2:
filter->method = CV_TM_CCORR;
filter->method = cv::TM_CCORR;
break;
case 3:
filter->method = CV_TM_CCORR_NORMED;
filter->method = cv::TM_CCORR_NORMED;
break;
case 4:
filter->method = CV_TM_CCOEFF;
filter->method = cv::TM_CCOEFF;
break;
case 5:
filter->method = CV_TM_CCOEFF_NORMED;
filter->method = cv::TM_CCOEFF_NORMED;
break;
}
GST_OBJECT_UNLOCK (filter);
@ -298,28 +275,24 @@ gst_template_match_finalize (GObject * object)
g_free (filter->templ);
if (filter->cvDistImage) {
cvReleaseImage (&filter->cvDistImage);
}
if (filter->cvTemplateImage) {
cvReleaseImage (&filter->cvTemplateImage);
}
filter->cvDistImage.release ();
filter->cvTemplateImage.release ();
G_OBJECT_CLASS (gst_template_match_parent_class)->finalize (object);
}
static void
gst_template_match_match (IplImage * input, IplImage * templ,
IplImage * dist_image, double *best_res, CvPoint * best_pos, int method)
gst_template_match_match (cv::Mat input, cv::Mat templ,
cv::Mat dist_image, double *best_res, cv::Point * best_pos, int method)
{
double dist_min = 0, dist_max = 0;
CvPoint min_pos, max_pos;
cvMatchTemplate (input, templ, dist_image, method);
cvMinMaxLoc (dist_image, &dist_min, &dist_max, &min_pos, &max_pos, NULL);
if ((CV_TM_SQDIFF_NORMED == method) || (CV_TM_SQDIFF == method)) {
cv::Point min_pos, max_pos;
matchTemplate (input, templ, dist_image, method);
minMaxLoc (dist_image, &dist_min, &dist_max, &min_pos, &max_pos);
if ((cv::TM_SQDIFF_NORMED == method) || (cv::TM_SQDIFF == method)) {
*best_res = dist_min;
*best_pos = min_pos;
if (CV_TM_SQDIFF_NORMED == method) {
if (cv::TM_SQDIFF_NORMED == method) {
*best_res = 1 - *best_res;
}
} else {
@ -333,10 +306,10 @@ gst_template_match_match (IplImage * input, IplImage * templ,
*/
static GstFlowReturn
gst_template_match_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
cv::Mat img)
{
GstTemplateMatch *filter;
CvPoint best_pos;
cv::Point best_pos;
double best_res;
GstMessage *m = NULL;
@ -345,27 +318,25 @@ gst_template_match_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
GST_LOG_OBJECT (filter, "Buffer size %u", (guint) gst_buffer_get_size (buf));
GST_OBJECT_LOCK (filter);
if (filter->cvTemplateImage && !filter->cvDistImage) {
if (filter->cvTemplateImage->width > img->width) {
if (!filter->cvTemplateImage.empty () && filter->reload_dist_image) {
if (filter->cvTemplateImage.size ().width > img.size ().width) {
GST_WARNING ("Template Image is wider than input image");
} else if (filter->cvTemplateImage->height > img->height) {
} else if (filter->cvTemplateImage.size ().height > img.size ().height) {
GST_WARNING ("Template Image is taller than input image");
} else {
GST_DEBUG_OBJECT (filter, "cvCreateImage (Size(%d-%d+1,%d) %d, %d)",
img->width, filter->cvTemplateImage->width,
img->height - filter->cvTemplateImage->height + 1, IPL_DEPTH_32F, 1);
filter->cvDistImage =
cvCreateImage (cvSize (img->width -
filter->cvTemplateImage->width + 1,
img->height - filter->cvTemplateImage->height + 1),
IPL_DEPTH_32F, 1);
if (!filter->cvDistImage) {
GST_WARNING ("Couldn't create dist image.");
}
GST_DEBUG_OBJECT (filter, "cv create (Size(%d-%d+1,%d) %d)",
img.size ().width, filter->cvTemplateImage.size ().width,
img.size ().height - filter->cvTemplateImage.size ().height + 1,
CV_32FC1);
filter->cvDistImage.create (cv::Size (img.size ().width -
filter->cvTemplateImage.size ().width + 1,
img.size ().height - filter->cvTemplateImage.size ().height + 1),
CV_32FC1);
filter->reload_dist_image = FALSE;
}
}
if (filter->cvTemplateImage && filter->cvDistImage) {
if (!filter->cvTemplateImage.empty () && !filter->reload_dist_image) {
GstStructure *s;
gst_template_match_match (img, filter->cvTemplateImage,
@ -374,18 +345,18 @@ gst_template_match_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
s = gst_structure_new ("template_match",
"x", G_TYPE_UINT, best_pos.x,
"y", G_TYPE_UINT, best_pos.y,
"width", G_TYPE_UINT, filter->cvTemplateImage->width,
"height", G_TYPE_UINT, filter->cvTemplateImage->height,
"width", G_TYPE_UINT, filter->cvTemplateImage.size ().width,
"height", G_TYPE_UINT, filter->cvTemplateImage.size ().height,
"result", G_TYPE_DOUBLE, best_res, NULL);
m = gst_message_new_element (GST_OBJECT (filter), s);
if (filter->display) {
CvPoint corner = best_pos;
CvScalar color;
if (filter->method == CV_TM_SQDIFF_NORMED
|| filter->method == CV_TM_CCORR_NORMED
|| filter->method == CV_TM_CCOEFF_NORMED) {
cv::Point corner = best_pos;
cv::Scalar color;
if (filter->method == cv::TM_SQDIFF_NORMED
|| filter->method == cv::TM_CCORR_NORMED
|| filter->method == cv::TM_CCOEFF_NORMED) {
/* Yellow growing redder as match certainty approaches 1.0. This can
only be applied with method == *_NORMED as the other match methods
aren't normalized to be in range 0.0 - 1.0 */
@ -396,9 +367,9 @@ gst_template_match_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
buf = gst_buffer_make_writable (buf);
corner.x += filter->cvTemplateImage->width;
corner.y += filter->cvTemplateImage->height;
cvRectangle (img, best_pos, corner, color, 3, 8, 0);
corner.x += filter->cvTemplateImage.size ().width;
corner.y += filter->cvTemplateImage.size ().height;
cv::rectangle (img, best_pos, corner, color, 3, 8, 0);
}
}

View file

@ -72,7 +72,8 @@ struct _GstTemplateMatch
gchar *templ;
IplImage *cvGray, *cvTemplateImage, *cvDistImage;
cv::Mat cvTemplateImage, cvDistImage;
gboolean reload_dist_image;
};
struct _GstTemplateMatchClass

View file

@ -121,7 +121,7 @@ static void gst_opencv_text_overlay_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_opencv_text_overlay_transform_ip (GstOpencvVideoFilter
* filter, GstBuffer * buf, IplImage * img);
* filter, GstBuffer * buf, cv::Mat img);
/* Clean up */
static void
@ -326,15 +326,14 @@ gst_opencv_text_overlay_get_property (GObject * object, guint prop_id,
*/
static GstFlowReturn
gst_opencv_text_overlay_transform_ip (GstOpencvVideoFilter * base,
GstBuffer * buf, IplImage * img)
GstBuffer * buf, cv::Mat img)
{
GstOpencvTextOverlay *filter = GST_OPENCV_TEXT_OVERLAY (base);
cvInitFont (&(filter->font), CV_FONT_VECTOR0, filter->width, filter->height,
0, filter->thickness, 0);
cvPutText (img, filter->textbuf, cvPoint (filter->xpos,
filter->ypos), &(filter->font), cvScalar (filter->colorR,
filter->colorG, filter->colorB, 0));
cv::putText (img, filter->textbuf, cv::Point (filter->xpos,
filter->ypos), cv::FONT_HERSHEY_SIMPLEX,
(filter->width + filter->height) * 0.5, cv::Scalar (filter->colorR,
filter->colorG, filter->colorB), filter->thickness);
return GST_FLOW_OK;
}

View file

@ -49,9 +49,6 @@
#include <gst/opencv/gstopencvvideofilter.h>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/imgproc/imgproc_c.h>
#endif
G_BEGIN_DECLS
@ -74,8 +71,6 @@ struct _GstOpencvTextOverlay
{
GstOpencvVideoFilter element;
CvFont font;
gint xpos;
gint ypos;
gint thickness;

View file

@ -1,7 +1,7 @@
/*
* GStreamer
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2011 Nicola Murino <nicola.murino@gmail.com>
* Copyright (C) 2011 - 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -57,7 +57,7 @@ static int instanceCounter = 0;
static bool element_id_was_max = false;
vector < instanceOfMC > motioncellsvector;
vector < int > motioncellsfreeids;
vector < int >motioncellsfreeids;
MotionCells *mc;
char p_str[] = "idx failed";
@ -82,7 +82,7 @@ motion_cells_init ()
}
int
perform_detection_motion_cells (IplImage * p_image, double p_sensitivity,
perform_detection_motion_cells (cv::Mat p_image, double p_sensitivity,
double p_framerate, int p_gridx, int p_gridy, long int p_timestamp_millisec,
bool p_isVisible, bool p_useAlpha, int motionmaskcoord_count,
motionmaskcoordrect * motionmaskcoords, int motionmaskcells_count,
@ -105,7 +105,7 @@ perform_detection_motion_cells (IplImage * p_image, double p_sensitivity,
void
setPrevFrame (IplImage * p_prevFrame, int p_id)
setPrevFrame (cv::Mat p_prevFrame, int p_id)
{
int idx = 0;
idx = searchIdx (p_id);

View file

@ -1,7 +1,7 @@
/*
* GStreamer
* Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com>
* Copyright (C) 2011 Nicola Murino <nicola.murino@gmail.com>
* Copyright (C) 2011 - 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -61,7 +61,7 @@ extern "C"
#endif
int motion_cells_init ();
int perform_detection_motion_cells (IplImage * p_image, double p_sensitivity,
int perform_detection_motion_cells (cv::Mat p_image, double p_sensitivity,
double p_framerate, int p_gridx, int p_gridy,
long int p_timestamp_millisec, bool p_isVisible, bool p_useAlpha,
int motionmaskcoord_count, motionmaskcoordrect * motionmaskcoords,
@ -69,7 +69,7 @@ extern "C"
cellscolor motioncellscolor, int motioncells_count,
motioncellidx * motioncellsidx, gint64 starttime, char *datafile,
bool p_changed_datafile, int p_thickness, int p_id);
void setPrevFrame (IplImage * p_prevFrame, int p_id);
void setPrevFrame (cv::Mat p_prevFrame, int p_id);
void motion_cells_free (int p_id);
void motion_cells_free_resources (int p_id);
char *getMotionCellsIdx (int p_id);

View file

@ -1,5 +1,6 @@
/* GStreamer
* Copyright (C) <2010> Thiago Santos <thiago.sousa.santos@collabora.co.uk>
* Copyright (C) <2018> Nicola Murino <nicola.murino@gmail.com>
*
* gstopencvutils.c: miscellaneous utility functions
*
@ -25,9 +26,6 @@
#include "gstopencvutils.h"
#include <opencv2/core.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/types_c.h>
#endif
/*
The various opencv image containers or headers store the following information:
@ -78,9 +76,8 @@ Some have restrictions but if a format is supported then both BGR and RGB
layouts will be supported.
*/
gboolean
gst_opencv_parse_iplimage_params_from_caps (GstCaps * caps, gint * width,
gint * height, gint * ipldepth, gint * channels, GError ** err)
gboolean gst_opencv_parse_cv_mat_params_from_caps
(GstCaps * caps, gint * width, gint * height, int *cv_type, GError ** err)
{
GstVideoInfo info;
gchar *caps_str;
@ -94,28 +91,24 @@ gst_opencv_parse_iplimage_params_from_caps (GstCaps * caps, gint * width,
return FALSE;
}
return gst_opencv_iplimage_params_from_video_info (&info, width, height,
ipldepth, channels, err);
return gst_opencv_cv_mat_params_from_video_info (&info, width, height,
cv_type, err);
}
gboolean
gst_opencv_iplimage_params_from_video_info (GstVideoInfo * info, gint * width,
gint * height, gint * ipldepth, gint * channels, GError ** err)
gboolean gst_opencv_cv_mat_params_from_video_info
(GstVideoInfo * info, gint * width, gint * height, int *cv_type,
GError ** err)
{
GstVideoFormat format;
int cv_type;
format = GST_VIDEO_INFO_FORMAT (info);
if (!gst_opencv_cv_image_type_from_video_format (format, &cv_type, err)) {
if (!gst_opencv_cv_image_type_from_video_format (format, cv_type, err)) {
return FALSE;
}
*width = GST_VIDEO_INFO_WIDTH (info);
*height = GST_VIDEO_INFO_HEIGHT (info);
*ipldepth = cvIplDepth (cv_type);
*channels = CV_MAT_CN (cv_type);
return TRUE;
}

View file

@ -1,5 +1,6 @@
/* GStreamer
* Copyright (C) <2010> Thiago Santos <thiago.sousa.santos@collabora.co.uk>
* Copyright (C) <2018> Nicola Murino <nicola.murino@gmail.com>
*
* gstopencvutils.h: miscellaneous utility functions
*
@ -33,14 +34,14 @@
G_BEGIN_DECLS
GST_OPENCV_API
gboolean gst_opencv_parse_iplimage_params_from_caps
(GstCaps * caps, gint * width, gint * height, gint * depth,
gint * channels, GError ** err);
gboolean gst_opencv_parse_cv_mat_params_from_caps
(GstCaps * caps, gint * width, gint * height, int * cv_type,
GError ** err);
GST_OPENCV_API
gboolean gst_opencv_iplimage_params_from_video_info
(GstVideoInfo * info, gint * width, gint * height, gint * depth,
gint * channels, GError ** err);
gboolean gst_opencv_cv_mat_params_from_video_info
(GstVideoInfo * info, gint * width, gint * height, int *cv_type,
GError ** err);
GST_OPENCV_API
gboolean gst_opencv_cv_image_type_from_video_format (GstVideoFormat format,

View file

@ -1,6 +1,7 @@
/*
* GStreamer
* Copyright (C) 2010 Thiago Santos <thiago.sousa.santos@collabora.co.uk>
* Copyright (C) 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -49,11 +50,7 @@
#include "gstopencvvideofilter.h"
#include "gstopencvutils.h"
#include <opencv2/core.hpp>
#if (CV_MAJOR_VERSION >= 4)
#include <opencv2/core/core_c.h>
#endif
GST_DEBUG_CATEGORY_STATIC (gst_opencv_video_filter_debug);
#define GST_CAT_DEFAULT gst_opencv_video_filter_debug
@ -94,10 +91,8 @@ gst_opencv_video_filter_finalize (GObject * obj)
{
GstOpencvVideoFilter *transform = GST_OPENCV_VIDEO_FILTER (obj);
if (transform->cvImage)
cvReleaseImage (&transform->cvImage);
if (transform->out_cvImage)
cvReleaseImage (&transform->out_cvImage);
transform->cvImage.release ();
transform->out_cvImage.release ();
G_OBJECT_CLASS (parent_class)->finalize (obj);
}
@ -142,17 +137,11 @@ gst_opencv_video_filter_transform_frame (GstVideoFilter * trans,
fclass = GST_OPENCV_VIDEO_FILTER_GET_CLASS (transform);
g_return_val_if_fail (fclass->cv_trans_func != NULL, GST_FLOW_ERROR);
g_return_val_if_fail (transform->cvImage != NULL, GST_FLOW_ERROR);
g_return_val_if_fail (transform->out_cvImage != NULL, GST_FLOW_ERROR);
transform->cvImage->imageData = (char *) inframe->data[0];
transform->cvImage->imageSize = inframe->info.size;
transform->cvImage->widthStep = inframe->info.stride[0];
transform->out_cvImage->imageData = (char *) outframe->data[0];
transform->out_cvImage->imageSize = outframe->info.size;
transform->out_cvImage->widthStep = outframe->info.stride[0];
transform->cvImage.data = (unsigned char *) inframe->data[0];
transform->cvImage.datastart = (unsigned char *) inframe->data[0];
transform->out_cvImage.data = (unsigned char *) outframe->data[0];
transform->out_cvImage.datastart = (unsigned char *) outframe->data[0];
ret = fclass->cv_trans_func (transform, inframe->buffer, transform->cvImage,
outframe->buffer, transform->out_cvImage);
@ -171,11 +160,9 @@ gst_opencv_video_filter_transform_frame_ip (GstVideoFilter * trans,
fclass = GST_OPENCV_VIDEO_FILTER_GET_CLASS (transform);
g_return_val_if_fail (fclass->cv_trans_ip_func != NULL, GST_FLOW_ERROR);
g_return_val_if_fail (transform->cvImage != NULL, GST_FLOW_ERROR);
transform->cvImage->imageData = (char *) frame->data[0];
transform->cvImage->imageSize = frame->info.size;
transform->cvImage->widthStep = frame->info.stride[0];
transform->cvImage.data = (unsigned char *) frame->data[0];
transform->cvImage.datastart = (unsigned char *) frame->data[0];
ret = fclass->cv_trans_ip_func (transform, frame->buffer, transform->cvImage);
@ -190,22 +177,22 @@ gst_opencv_video_filter_set_info (GstVideoFilter * trans, GstCaps * incaps,
GstOpencvVideoFilterClass *klass =
GST_OPENCV_VIDEO_FILTER_GET_CLASS (transform);
gint in_width, in_height;
gint in_depth, in_channels;
int in_cv_type;
gint out_width, out_height;
gint out_depth, out_channels;
int out_cv_type;
GError *in_err = NULL;
GError *out_err = NULL;
if (!gst_opencv_iplimage_params_from_video_info (in_info, &in_width,
&in_height, &in_depth, &in_channels, &in_err)) {
if (!gst_opencv_cv_mat_params_from_video_info (in_info, &in_width,
&in_height, &in_cv_type, &in_err)) {
GST_WARNING_OBJECT (transform, "Failed to parse input caps: %s",
in_err->message);
g_error_free (in_err);
return FALSE;
}
if (!gst_opencv_iplimage_params_from_video_info (out_info, &out_width,
&out_height, &out_depth, &out_channels, &out_err)) {
if (!gst_opencv_cv_mat_params_from_video_info (out_info, &out_width,
&out_height, &out_cv_type, &out_err)) {
GST_WARNING_OBJECT (transform, "Failed to parse output caps: %s",
out_err->message);
g_error_free (out_err);
@ -213,23 +200,13 @@ gst_opencv_video_filter_set_info (GstVideoFilter * trans, GstCaps * incaps,
}
if (klass->cv_set_caps) {
if (!klass->cv_set_caps (transform, in_width, in_height, in_depth,
in_channels, out_width, out_height, out_depth, out_channels))
if (!klass->cv_set_caps (transform, in_width, in_height, in_cv_type,
out_width, out_height, out_cv_type))
return FALSE;
}
if (transform->cvImage) {
cvReleaseImage (&transform->cvImage);
}
if (transform->out_cvImage) {
cvReleaseImage (&transform->out_cvImage);
}
transform->cvImage =
cvCreateImageHeader (cvSize (in_width, in_height), in_depth, in_channels);
transform->out_cvImage =
cvCreateImageHeader (cvSize (out_width, out_height), out_depth,
out_channels);
transform->cvImage.create (cv::Size (in_width, in_height), in_cv_type);
transform->out_cvImage.create (cv::Size (out_width, out_height), out_cv_type);
gst_base_transform_set_in_place (GST_BASE_TRANSFORM (transform),
transform->in_place);

View file

@ -1,6 +1,7 @@
/*
* GStreamer
* Copyright (C) 2010 Thiago Santos <thiago.sousa.santos@collabora.co.uk>
* Copyright (C) 2018 Nicola Murino <nicola.murino@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -47,11 +48,12 @@
#include <gst/gst.h>
#include <gst/video/gstvideofilter.h>
#include <gst/opencv/opencv-prelude.h>
#include <opencv2/core.hpp>
G_BEGIN_DECLS
/* forward declare opencv type to avoid exposing them in this API */
typedef struct _IplImage IplImage;
//typedef struct _IplImage IplImage;
/* #defines don't like whitespacey bits */
#define GST_TYPE_OPENCV_VIDEO_FILTER \
@ -72,15 +74,15 @@ typedef struct _GstOpencvVideoFilter GstOpencvVideoFilter;
typedef struct _GstOpencvVideoFilterClass GstOpencvVideoFilterClass;
typedef GstFlowReturn (*GstOpencvVideoFilterTransformIPFunc)
(GstOpencvVideoFilter * transform, GstBuffer * buffer, IplImage * img);
(GstOpencvVideoFilter * transform, GstBuffer * buffer, cv::Mat img);
typedef GstFlowReturn (*GstOpencvVideoFilterTransformFunc)
(GstOpencvVideoFilter * transform, GstBuffer * buffer, IplImage * img,
GstBuffer * outbuf, IplImage * outimg);
(GstOpencvVideoFilter * transform, GstBuffer * buffer, cv::Mat img,
GstBuffer * outbuf, cv::Mat outimg);
typedef gboolean (*GstOpencvVideoFilterSetCaps)
(GstOpencvVideoFilter * transform, gint in_width, gint in_height,
gint in_depth, gint in_channels, gint out_width, gint out_height,
gint out_depth, gint out_channels);
int in_cv_type, gint out_width, gint out_height,
int out_cv_type);
struct _GstOpencvVideoFilter
{
@ -88,8 +90,8 @@ struct _GstOpencvVideoFilter
gboolean in_place;
IplImage *cvImage;
IplImage *out_cvImage;
cv::Mat cvImage;
cv::Mat out_cvImage;
};
struct _GstOpencvVideoFilterClass