opencv: fix indentation

This commit is contained in:
Nicola Murino 2018-11-25 16:12:40 +01:00 committed by Nicolas Dufresne
parent 2fd3130350
commit 890dbb560f
16 changed files with 463 additions and 337 deletions

View file

@ -198,7 +198,7 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
delete[]m_motioncellsidxcstr;
m_motioncells_idx_count = m_MotionCells.size () * MSGLEN; //one motion cell idx: (lin idx : col idx,) it's up to 6 character except last motion cell idx
m_motioncellsidxcstr = new char[m_motioncells_idx_count];
char *tmpstr = new char[MSGLEN+ 1];
char *tmpstr = new char[MSGLEN + 1];
tmpstr[0] = 0;
for (unsigned int i = 0; i < m_MotionCells.size (); i++) {
CvPoint pt1, pt2;
@ -223,10 +223,10 @@ MotionCells::performDetectionMotionCells (IplImage * p_frame,
}
if (i < m_MotionCells.size () - 1) {
snprintf (tmpstr, MSGLEN+1, "%d:%d,", m_MotionCells.at (i).lineidx,
snprintf (tmpstr, MSGLEN + 1, "%d:%d,", m_MotionCells.at (i).lineidx,
m_MotionCells.at (i).colidx);
} else {
snprintf (tmpstr, MSGLEN+1, "%d:%d", m_MotionCells.at (i).lineidx,
snprintf (tmpstr, MSGLEN + 1, "%d:%d", m_MotionCells.at (i).lineidx,
m_MotionCells.at (i).colidx);
}
if (i == 0)

View file

@ -22,20 +22,22 @@
#include <opencv2/opencv.hpp>
gchar *
camera_serialize_undistort_settings (cv::Mat &cameraMatrix, cv::Mat &distCoeffs)
camera_serialize_undistort_settings (cv::Mat & cameraMatrix,
cv::Mat & distCoeffs)
{
cv::FileStorage fs(".xml", cv::FileStorage::WRITE + cv::FileStorage::MEMORY);
cv::FileStorage fs (".xml", cv::FileStorage::WRITE + cv::FileStorage::MEMORY);
fs << "cameraMatrix" << cameraMatrix;
fs << "distCoeffs" << distCoeffs;
std::string buf = fs.releaseAndGetString();
std::string buf = fs.releaseAndGetString ();
return g_strdup(buf.c_str());
return g_strdup (buf.c_str ());
}
gboolean
camera_deserialize_undistort_settings (gchar * str, cv::Mat &cameraMatrix, cv::Mat &distCoeffs)
camera_deserialize_undistort_settings (gchar * str, cv::Mat & cameraMatrix,
cv::Mat & distCoeffs)
{
cv::FileStorage fs(str, cv::FileStorage::READ + cv::FileStorage::MEMORY);
cv::FileStorage fs (str, cv::FileStorage::READ + cv::FileStorage::MEMORY);
fs["cameraMatrix"] >> cameraMatrix;
fs["distCoeffs"] >> distCoeffs;

View file

@ -128,10 +128,11 @@ enum
PROP_SETTINGS
};
enum {
DETECTION = 0,
CAPTURING = 1,
CALIBRATED = 2
enum
{
DETECTION = 0,
CAPTURING = 1,
CALIBRATED = 2
};
#define GST_TYPE_CAMERA_CALIBRATION_PATTERN (camera_calibration_pattern_get_type ())
@ -142,19 +143,23 @@ camera_calibration_pattern_get_type (void)
static GType camera_calibration_pattern_type = 0;
static const GEnumValue camera_calibration_pattern[] = {
{GST_CAMERA_CALIBRATION_PATTERN_CHESSBOARD, "Chessboard", "chessboard"},
{GST_CAMERA_CALIBRATION_PATTERN_CIRCLES_GRID, "Circle Grids", "circle_grids"},
{GST_CAMERA_CALIBRATION_PATTERN_ASYMMETRIC_CIRCLES_GRID, "Asymmetric Circle Grids", "asymmetric_circle_grids"},
{GST_CAMERA_CALIBRATION_PATTERN_CIRCLES_GRID, "Circle Grids",
"circle_grids"},
{GST_CAMERA_CALIBRATION_PATTERN_ASYMMETRIC_CIRCLES_GRID,
"Asymmetric Circle Grids", "asymmetric_circle_grids"},
{0, NULL, NULL},
};
if (!camera_calibration_pattern_type) {
camera_calibration_pattern_type =
g_enum_register_static ("GstCameraCalibrationPattern", camera_calibration_pattern);
g_enum_register_static ("GstCameraCalibrationPattern",
camera_calibration_pattern);
}
return camera_calibration_pattern_type;
}
G_DEFINE_TYPE (GstCameraCalibrate, gst_camera_calibrate, GST_TYPE_OPENCV_VIDEO_FILTER);
G_DEFINE_TYPE (GstCameraCalibrate, gst_camera_calibrate,
GST_TYPE_OPENCV_VIDEO_FILTER);
static void gst_camera_calibrate_dispose (GObject * object);
static void gst_camera_calibrate_set_property (GObject * object, guint prop_id,
@ -162,8 +167,9 @@ static void gst_camera_calibrate_set_property (GObject * object, guint prop_id,
static void gst_camera_calibrate_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_camera_calibrate_transform_frame_ip (
GstOpencvVideoFilter * cvfilter, GstBuffer * frame, IplImage * img);
static GstFlowReturn
gst_camera_calibrate_transform_frame_ip (GstOpencvVideoFilter * cvfilter,
GstBuffer * frame, IplImage * img);
/* clean up */
static void
@ -178,7 +184,8 @@ gst_camera_calibrate_class_init (GstCameraCalibrateClass * klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstOpencvVideoFilterClass *opencvfilter_class = GST_OPENCV_VIDEO_FILTER_CLASS (klass);
GstOpencvVideoFilterClass *opencvfilter_class =
GST_OPENCV_VIDEO_FILTER_CLASS (klass);
GstCaps *caps;
GstPadTemplate *templ;
@ -223,22 +230,26 @@ gst_camera_calibrate_class_init (GstCameraCalibrateClass * klass)
g_object_class_install_property (gobject_class, PROP_CORNER_SUB_PIXEL,
g_param_spec_boolean ("corner-sub-pixel", "Corner Sub Pixel",
"Improve corner detection accuracy for chessboard",
DEFAULT_CORNER_SUB_PIXEL, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_CORNER_SUB_PIXEL,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_ZERO_TANGENT_DISTORTION,
g_param_spec_boolean ("zero-tangent-distorsion", "Zero Tangent Distorsion",
"Assume zero tangential distortion",
DEFAULT_ZERO_TANGENT_DISTORTION, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_param_spec_boolean ("zero-tangent-distorsion",
"Zero Tangent Distorsion", "Assume zero tangential distortion",
DEFAULT_ZERO_TANGENT_DISTORTION,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_CENTER_PRINCIPAL_POINT,
g_param_spec_boolean ("center-principal-point", "Center Principal Point",
"Fix the principal point at the center",
DEFAULT_CENTER_PRINCIPAL_POINT, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_CENTER_PRINCIPAL_POINT,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_USE_FISHEYE,
g_param_spec_boolean ("use-fisheye", "Use Fisheye",
"Use fisheye camera model for calibration",
DEFAULT_USE_FISHEYE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_USE_FISHEYE,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_DELAY,
g_param_spec_int ("delay", "Delay",
@ -248,14 +259,15 @@ gst_camera_calibrate_class_init (GstCameraCalibrateClass * klass)
g_object_class_install_property (gobject_class, PROP_FRAME_COUNT,
g_param_spec_int ("frame-count", "Frame Count",
"The number of frames to use from the input for calibration", 1, G_MAXINT,
DEFAULT_FRAME_COUNT,
"The number of frames to use from the input for calibration", 1,
G_MAXINT, DEFAULT_FRAME_COUNT,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_SHOW_CORNERS,
g_param_spec_boolean ("show-corners", "Show Corners",
"Show corners",
DEFAULT_SHOW_CORNERS, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_SHOW_CORNERS,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_SETTINGS,
g_param_spec_string ("settings", "Settings",
@ -289,7 +301,7 @@ gst_camera_calibrate_init (GstCameraCalibrate * calib)
calib->boardSize.width = DEFAULT_BOARD_WIDTH;
calib->boardSize.height = DEFAULT_BOARD_HEIGHT;
calib->squareSize = DEFAULT_SQUARE_SIZE;
calib->aspectRatio = DEFAULT_ASPECT_RATIO;
calib->aspectRatio = DEFAULT_ASPECT_RATIO;
calib->cornerSubPix = DEFAULT_CORNER_SUB_PIXEL;
calib->calibZeroTangentDist = DEFAULT_ZERO_TANGENT_DISTORTION;
calib->calibFixPrincipalPoint = DEFAULT_CENTER_PRINCIPAL_POINT;
@ -299,28 +311,33 @@ gst_camera_calibrate_init (GstCameraCalibrate * calib)
calib->showCorners = DEFAULT_SHOW_CORNERS;
calib->flags = cv::CALIB_FIX_K4 | cv::CALIB_FIX_K5;
if (calib->calibFixPrincipalPoint) calib->flags |= cv::CALIB_FIX_PRINCIPAL_POINT;
if (calib->calibZeroTangentDist) calib->flags |= cv::CALIB_ZERO_TANGENT_DIST;
if (calib->aspectRatio) calib->flags |= cv::CALIB_FIX_ASPECT_RATIO;
if (calib->calibFixPrincipalPoint)
calib->flags |= cv::CALIB_FIX_PRINCIPAL_POINT;
if (calib->calibZeroTangentDist)
calib->flags |= cv::CALIB_ZERO_TANGENT_DIST;
if (calib->aspectRatio)
calib->flags |= cv::CALIB_FIX_ASPECT_RATIO;
if (calib->useFisheye) {
/* the fisheye model has its own enum, so overwrite the flags */
calib->flags = cv::fisheye::CALIB_FIX_SKEW | cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC |
/*cv::fisheye::CALIB_FIX_K1 |*/
cv::fisheye::CALIB_FIX_K2 | cv::fisheye::CALIB_FIX_K3 | cv::fisheye::CALIB_FIX_K4;
/* the fisheye model has its own enum, so overwrite the flags */
calib->flags =
cv::fisheye::CALIB_FIX_SKEW | cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC |
/*cv::fisheye::CALIB_FIX_K1 | */
cv::fisheye::CALIB_FIX_K2 | cv::fisheye::CALIB_FIX_K3 | cv::
fisheye::CALIB_FIX_K4;
}
calib->mode = CAPTURING; //DETECTION;
calib->mode = CAPTURING; //DETECTION;
calib->prevTimestamp = 0;
calib->imagePoints.clear();
calib->imagePoints.clear ();
calib->cameraMatrix = 0;
calib->distCoeffs = 0;
calib->settings = NULL;
gst_opencv_video_filter_set_in_place (
GST_OPENCV_VIDEO_FILTER_CAST (calib), TRUE);
gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER_CAST (calib),
TRUE);
}
static void
@ -435,7 +452,7 @@ gst_camera_calibrate_get_property (GObject * object, guint prop_id,
}
}
void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img);
void camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img);
/*
* Performs the camera calibration
@ -446,23 +463,24 @@ gst_camera_calibrate_transform_frame_ip (GstOpencvVideoFilter * cvfilter,
{
GstCameraCalibrate *calib = GST_CAMERA_CALIBRATE (cvfilter);
camera_calibrate_run(calib, img);
camera_calibrate_run (calib, img);
return GST_FLOW_OK;
}
bool camera_calibrate_calibrate(GstCameraCalibrate *calib,
cv::Size imageSize, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
std::vector<std::vector<cv::Point2f> > imagePoints );
bool camera_calibrate_calibrate (GstCameraCalibrate * calib,
cv::Size imageSize, cv::Mat & cameraMatrix, cv::Mat & distCoeffs,
std::vector < std::vector < cv::Point2f > >imagePoints);
void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
void
camera_calibrate_run (GstCameraCalibrate * calib, IplImage * img)
{
cv::Mat view = cv::cvarrToMat(img);
cv::Mat view = cv::cvarrToMat (img);
// For camera only take new samples after delay time
if (calib->mode == CAPTURING) {
// get_input
cv::Size imageSize = view.size();
cv::Size imageSize = view.size ();
/* find_pattern
* FIXME find ways to reduce CPU usage
@ -471,9 +489,10 @@ void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
* in a separate element that gets composited back into the main stream
* (video is tee-d into it and can then be decimated, scaled, etc..) */
std::vector<cv::Point2f> pointBuf;
std::vector < cv::Point2f > pointBuf;
bool found;
int chessBoardFlags = cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE;
int chessBoardFlags =
cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE;
if (!calib->useFisheye) {
/* fast check erroneously fails with high distortions like fisheye */
@ -481,15 +500,19 @@ void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
}
/* Find feature points on the input format */
switch(calib->calibrationPattern) {
switch (calib->calibrationPattern) {
case GST_CAMERA_CALIBRATION_PATTERN_CHESSBOARD:
found = cv::findChessboardCorners(view, calib->boardSize, pointBuf, chessBoardFlags);
found =
cv::findChessboardCorners (view, calib->boardSize, pointBuf,
chessBoardFlags);
break;
case GST_CAMERA_CALIBRATION_PATTERN_CIRCLES_GRID:
found = cv::findCirclesGrid(view, calib->boardSize, pointBuf);
found = cv::findCirclesGrid (view, calib->boardSize, pointBuf);
break;
case GST_CAMERA_CALIBRATION_PATTERN_ASYMMETRIC_CIRCLES_GRID:
found = cv::findCirclesGrid(view, calib->boardSize, pointBuf, cv::CALIB_CB_ASYMMETRIC_GRID );
found =
cv::findCirclesGrid (view, calib->boardSize, pointBuf,
cv::CALIB_CB_ASYMMETRIC_GRID);
break;
default:
found = FALSE;
@ -499,33 +522,41 @@ void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
bool blinkOutput = FALSE;
if (found) {
/* improve the found corners' coordinate accuracy for chessboard */
if (calib->calibrationPattern == GST_CAMERA_CALIBRATION_PATTERN_CHESSBOARD && calib->cornerSubPix) {
if (calib->calibrationPattern == GST_CAMERA_CALIBRATION_PATTERN_CHESSBOARD
&& calib->cornerSubPix) {
/* FIXME findChessboardCorners and alike do a cv::COLOR_BGR2GRAY (and a histogram balance)
* the color convert should be done once (if needed) and shared
* FIXME keep viewGray around to avoid reallocating it each time... */
cv::Mat viewGray;
cv::cvtColor(view, viewGray, cv::COLOR_BGR2GRAY);
cv::cornerSubPix(viewGray, pointBuf, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));
cv::cvtColor (view, viewGray, cv::COLOR_BGR2GRAY);
cv::cornerSubPix (viewGray, pointBuf, cv::Size (11, 11), cv::Size (-1,
-1),
cv::TermCriteria (cv::TermCriteria::EPS + cv::TermCriteria::COUNT,
30, 0.1));
}
/* take new samples after delay time */
if ((calib->mode == CAPTURING) && ((clock() - calib->prevTimestamp) > calib->delay * 1e-3 * CLOCKS_PER_SEC)) {
calib->imagePoints.push_back(pointBuf);
calib->prevTimestamp = clock();
if ((calib->mode == CAPTURING)
&& ((clock () - calib->prevTimestamp) >
calib->delay * 1e-3 * CLOCKS_PER_SEC)) {
calib->imagePoints.push_back (pointBuf);
calib->prevTimestamp = clock ();
blinkOutput = true;
}
/* draw the corners */
if (calib->showCorners) {
cv::drawChessboardCorners(view, calib->boardSize, cv::Mat(pointBuf), found);
cv::drawChessboardCorners (view, calib->boardSize, cv::Mat (pointBuf),
found);
}
}
/* if got enough frames then stop calibration and show result */
if (calib->mode == CAPTURING && calib->imagePoints.size() >= (size_t)calib->nrFrames) {
if (calib->mode == CAPTURING
&& calib->imagePoints.size () >= (size_t) calib->nrFrames) {
if (camera_calibrate_calibrate(calib, imageSize, calib->cameraMatrix, calib->distCoeffs, calib->imagePoints)) {
if (camera_calibrate_calibrate (calib, imageSize, calib->cameraMatrix,
calib->distCoeffs, calib->imagePoints)) {
calib->mode = CALIBRATED;
GstPad *sink_pad = GST_BASE_TRANSFORM_SINK_PAD (calib);
@ -535,21 +566,27 @@ void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
/* set settings property */
g_free (calib->settings);
calib->settings = camera_serialize_undistort_settings(calib->cameraMatrix, calib->distCoeffs);
calib->settings =
camera_serialize_undistort_settings (calib->cameraMatrix,
calib->distCoeffs);
/* create calibrated event and send upstream and downstream */
sink_event = gst_camera_event_new_calibrated (calib->settings);
GST_LOG_OBJECT (sink_pad, "Sending upstream event %s.", GST_EVENT_TYPE_NAME (sink_event));
GST_LOG_OBJECT (sink_pad, "Sending upstream event %s.",
GST_EVENT_TYPE_NAME (sink_event));
if (!gst_pad_push_event (sink_pad, sink_event)) {
GST_WARNING_OBJECT (sink_pad, "Sending upstream event %p (%s) failed.",
sink_event, GST_EVENT_TYPE_NAME (sink_event));
GST_WARNING_OBJECT (sink_pad,
"Sending upstream event %p (%s) failed.", sink_event,
GST_EVENT_TYPE_NAME (sink_event));
}
src_event = gst_camera_event_new_calibrated (calib->settings);
GST_LOG_OBJECT (src_pad, "Sending downstream event %s.", GST_EVENT_TYPE_NAME (src_event));
GST_LOG_OBJECT (src_pad, "Sending downstream event %s.",
GST_EVENT_TYPE_NAME (src_event));
if (!gst_pad_push_event (src_pad, src_event)) {
GST_WARNING_OBJECT (src_pad, "Sending downstream event %p (%s) failed.",
src_event, GST_EVENT_TYPE_NAME (src_event));
GST_WARNING_OBJECT (src_pad,
"Sending downstream event %p (%s) failed.", src_event,
GST_EVENT_TYPE_NAME (src_event));
}
} else {
/* failed to calibrate, go back to detection mode */
@ -558,7 +595,7 @@ void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
}
if (calib->mode == CAPTURING && blinkOutput) {
bitwise_not(view, view);
bitwise_not (view, view);
}
}
@ -572,141 +609,146 @@ void camera_calibrate_run(GstCameraCalibrate *calib, IplImage *img)
std::string msg = (calib->mode == CAPTURING) ? "100/100" :
(calib->mode == CALIBRATED) ? "Calibrated" : "Waiting...";
int baseLine = 0;
cv::Size textSize = cv::getTextSize(msg, 1, 1, 1, &baseLine);
cv::Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10);
cv::Size textSize = cv::getTextSize (msg, 1, 1, 1, &baseLine);
cv::Point textOrigin (view.cols - 2 * textSize.width - 10,
view.rows - 2 * baseLine - 10);
if (calib->mode == CAPTURING) {
msg = cv::format("%d/%d", (int)calib->imagePoints.size(), calib->nrFrames);
msg =
cv::format ("%d/%d", (int) calib->imagePoints.size (), calib->nrFrames);
}
const cv::Scalar RED(0,0,255);
const cv::Scalar GREEN(0,255,0);
const cv::Scalar RED (0, 0, 255);
const cv::Scalar GREEN (0, 255, 0);
cv::putText(view, msg, textOrigin, 1, 1, calib->mode == CALIBRATED ? GREEN : RED);
cv::putText (view, msg, textOrigin, 1, 1,
calib->mode == CALIBRATED ? GREEN : RED);
}
static double camera_calibrate_calc_reprojection_errors (
const std::vector<std::vector<cv::Point3f> >& objectPoints,
const std::vector<std::vector<cv::Point2f> >& imagePoints,
const std::vector<cv::Mat>& rvecs, const std::vector<cv::Mat>& tvecs,
const cv::Mat& cameraMatrix , const cv::Mat& distCoeffs,
std::vector<float>& perViewErrors, bool fisheye)
static double
camera_calibrate_calc_reprojection_errors (const std::vector < std::vector <
cv::Point3f > >&objectPoints,
const std::vector < std::vector < cv::Point2f > >&imagePoints,
const std::vector < cv::Mat > &rvecs, const std::vector < cv::Mat > &tvecs,
const cv::Mat & cameraMatrix, const cv::Mat & distCoeffs,
std::vector < float >&perViewErrors, bool fisheye)
{
std::vector<cv::Point2f> imagePoints2;
std::vector < cv::Point2f > imagePoints2;
size_t totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
perViewErrors.resize (objectPoints.size ());
for(size_t i = 0; i < objectPoints.size(); ++i)
{
if (fisheye)
{
cv::fisheye::projectPoints(objectPoints[i], imagePoints2,
for (size_t i = 0; i < objectPoints.size (); ++i) {
if (fisheye) {
cv::fisheye::projectPoints (objectPoints[i], imagePoints2,
rvecs[i], tvecs[i], cameraMatrix, distCoeffs);
}
else
{
cv::projectPoints(objectPoints[i], rvecs[i], tvecs[i],
} else {
cv::projectPoints (objectPoints[i], rvecs[i], tvecs[i],
cameraMatrix, distCoeffs, imagePoints2);
}
err = cv::norm(imagePoints[i], imagePoints2, cv::NORM_L2);
err = cv::norm (imagePoints[i], imagePoints2, cv::NORM_L2);
size_t n = objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
size_t n = objectPoints[i].size ();
perViewErrors[i] = (float) std::sqrt (err * err / n);
totalErr += err * err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
return std::sqrt (totalErr / totalPoints);
}
static void camera_calibrate_calc_corners (cv::Size boardSize, float squareSize,
std::vector<cv::Point3f>& corners, gint patternType /*= CHESSBOARD*/)
static void
camera_calibrate_calc_corners (cv::Size boardSize, float squareSize,
std::vector < cv::Point3f > &corners, gint patternType /*= CHESSBOARD*/ )
{
corners.clear();
corners.clear ();
switch(patternType) {
switch (patternType) {
case GST_CAMERA_CALIBRATION_PATTERN_CHESSBOARD:
case GST_CAMERA_CALIBRATION_PATTERN_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; ++i)
for( int j = 0; j < boardSize.width; ++j)
corners.push_back(cv::Point3f(j * squareSize, i * squareSize, 0));
for (int i = 0; i < boardSize.height; ++i)
for (int j = 0; j < boardSize.width; ++j)
corners.push_back (cv::Point3f (j * squareSize, i * squareSize, 0));
break;
case GST_CAMERA_CALIBRATION_PATTERN_ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++)
for( int j = 0; j < boardSize.width; j++)
corners.push_back(cv::Point3f((2 * j + i % 2) * squareSize, i * squareSize, 0));
for (int i = 0; i < boardSize.height; i++)
for (int j = 0; j < boardSize.width; j++)
corners.push_back (cv::Point3f ((2 * j + i % 2) * squareSize,
i * squareSize, 0));
break;
default:
break;
}
}
static bool camera_calibrate_calibrate_full(GstCameraCalibrate *calib,
cv::Size& imageSize, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
std::vector<std::vector<cv::Point2f> > imagePoints,
std::vector<cv::Mat>& rvecs, std::vector<cv::Mat>& tvecs,
std::vector<float>& reprojErrs, double& totalAvgErr)
static bool
camera_calibrate_calibrate_full (GstCameraCalibrate * calib,
cv::Size & imageSize, cv::Mat & cameraMatrix, cv::Mat & distCoeffs,
std::vector < std::vector < cv::Point2f > >imagePoints,
std::vector < cv::Mat > &rvecs, std::vector < cv::Mat > &tvecs,
std::vector < float >&reprojErrs, double &totalAvgErr)
{
cameraMatrix = cv::Mat::eye(3, 3, CV_64F);
cameraMatrix = cv::Mat::eye (3, 3, CV_64F);
if (calib->flags & cv::CALIB_FIX_ASPECT_RATIO) {
cameraMatrix.at<double>(0,0) = calib->aspectRatio;
cameraMatrix.at < double >(0, 0) = calib->aspectRatio;
}
if (calib->useFisheye) {
distCoeffs = cv::Mat::zeros(4, 1, CV_64F);
distCoeffs = cv::Mat::zeros (4, 1, CV_64F);
} else {
distCoeffs = cv::Mat::zeros(8, 1, CV_64F);
distCoeffs = cv::Mat::zeros (8, 1, CV_64F);
}
std::vector<std::vector<cv::Point3f> > objectPoints(1);
std::vector < std::vector < cv::Point3f > >objectPoints (1);
camera_calibrate_calc_corners (calib->boardSize, calib->squareSize,
objectPoints[0], calib->calibrationPattern);
objectPoints.resize(imagePoints.size(), objectPoints[0]);
objectPoints.resize (imagePoints.size (), objectPoints[0]);
/* Find intrinsic and extrinsic camera parameters */
double rms;
if (calib->useFisheye) {
cv::Mat _rvecs, _tvecs;
rms = cv::fisheye::calibrate(objectPoints, imagePoints, imageSize,
rms = cv::fisheye::calibrate (objectPoints, imagePoints, imageSize,
cameraMatrix, distCoeffs, _rvecs, _tvecs, calib->flags);
rvecs.reserve(_rvecs.rows);
tvecs.reserve(_tvecs.rows);
for(int i = 0; i < int(objectPoints.size()); i++){
rvecs.push_back(_rvecs.row(i));
tvecs.push_back(_tvecs.row(i));
rvecs.reserve (_rvecs.rows);
tvecs.reserve (_tvecs.rows);
for (int i = 0; i < int (objectPoints.size ()); i++) {
rvecs.push_back (_rvecs.row (i));
tvecs.push_back (_tvecs.row (i));
}
} else {
rms = cv::calibrateCamera(objectPoints, imagePoints, imageSize,
rms = cv::calibrateCamera (objectPoints, imagePoints, imageSize,
cameraMatrix, distCoeffs, rvecs, tvecs, calib->flags);
}
GST_LOG_OBJECT (calib,
"Re-projection error reported by calibrateCamera: %f", rms);
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
bool ok = checkRange (cameraMatrix) && checkRange (distCoeffs);
totalAvgErr = camera_calibrate_calc_reprojection_errors (objectPoints, imagePoints,
totalAvgErr =
camera_calibrate_calc_reprojection_errors (objectPoints, imagePoints,
rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs, calib->useFisheye);
return ok;
}
bool camera_calibrate_calibrate(GstCameraCalibrate *calib,
cv::Size imageSize, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
std::vector<std::vector<cv::Point2f> > imagePoints)
bool
camera_calibrate_calibrate (GstCameraCalibrate * calib,
cv::Size imageSize, cv::Mat & cameraMatrix, cv::Mat & distCoeffs,
std::vector < std::vector < cv::Point2f > >imagePoints)
{
std::vector<cv::Mat> rvecs, tvecs;
std::vector<float> reprojErrs;
std::vector < cv::Mat > rvecs, tvecs;
std::vector < float >reprojErrs;
double totalAvgErr = 0;
bool ok = camera_calibrate_calibrate_full(calib,
bool ok = camera_calibrate_calibrate_full (calib,
imageSize, cameraMatrix, distCoeffs, imagePoints,
rvecs, tvecs, reprojErrs, totalAvgErr);
GST_LOG_OBJECT (calib, (ok ? "Calibration succeeded" : "Calibration failed"));
/* + ". avg re projection error = " + totalAvgErr);*/
/* + ". avg re projection error = " + totalAvgErr); */
return ok;
}
@ -720,8 +762,7 @@ gst_camera_calibrate_plugin_init (GstPlugin * plugin)
{
/* debug category for filtering log messages */
GST_DEBUG_CATEGORY_INIT (gst_camera_calibrate_debug, "cameracalibrate",
0,
"Performs camera calibration");
0, "Performs camera calibration");
return gst_element_register (plugin, "cameracalibrate", GST_RANK_NONE,
GST_TYPE_CAMERA_CALIBRATE);

View file

@ -98,7 +98,8 @@ enum
PROP_SETTINGS
};
G_DEFINE_TYPE (GstCameraUndistort, gst_camera_undistort, GST_TYPE_OPENCV_VIDEO_FILTER);
G_DEFINE_TYPE (GstCameraUndistort, gst_camera_undistort,
GST_TYPE_OPENCV_VIDEO_FILTER);
static void gst_camera_undistort_dispose (GObject * object);
static void gst_camera_undistort_set_property (GObject * object, guint prop_id,
@ -109,16 +110,19 @@ static void gst_camera_undistort_get_property (GObject * object, guint prop_id,
static gboolean gst_camera_undistort_set_info (GstOpencvVideoFilter * cvfilter,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
static GstFlowReturn gst_camera_undistort_transform_frame (
GstOpencvVideoFilter * cvfilter,
GstBuffer * frame, IplImage * img,
GstBuffer * outframe, IplImage * outimg);
static GstFlowReturn gst_camera_undistort_transform_frame (GstOpencvVideoFilter
* cvfilter, GstBuffer * frame, IplImage * img, GstBuffer * outframe,
IplImage * outimg);
static gboolean gst_camera_undistort_sink_event (GstBaseTransform *trans, GstEvent *event);
static gboolean gst_camera_undistort_src_event (GstBaseTransform *trans, GstEvent *event);
static gboolean gst_camera_undistort_sink_event (GstBaseTransform * trans,
GstEvent * event);
static gboolean gst_camera_undistort_src_event (GstBaseTransform * trans,
GstEvent * event);
static void camera_undistort_run(GstCameraUndistort *undist, IplImage *img, IplImage *outimg);
static gboolean camera_undistort_init_undistort_rectify_map(GstCameraUndistort *undist);
static void camera_undistort_run (GstCameraUndistort * undist, IplImage * img,
IplImage * outimg);
static gboolean camera_undistort_init_undistort_rectify_map (GstCameraUndistort
* undist);
/* initialize the cameraundistort's class */
static void
@ -127,7 +131,8 @@ gst_camera_undistort_class_init (GstCameraUndistortClass * klass)
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *trans_class = GST_BASE_TRANSFORM_CLASS (klass);
GstOpencvVideoFilterClass *opencvfilter_class = GST_OPENCV_VIDEO_FILTER_CLASS (klass);
GstOpencvVideoFilterClass *opencvfilter_class =
GST_OPENCV_VIDEO_FILTER_CLASS (klass);
GstCaps *caps;
GstPadTemplate *templ;
@ -136,19 +141,17 @@ gst_camera_undistort_class_init (GstCameraUndistortClass * klass)
gobject_class->set_property = gst_camera_undistort_set_property;
gobject_class->get_property = gst_camera_undistort_get_property;
trans_class->sink_event =
GST_DEBUG_FUNCPTR (gst_camera_undistort_sink_event);
trans_class->src_event =
GST_DEBUG_FUNCPTR (gst_camera_undistort_src_event);
trans_class->sink_event = GST_DEBUG_FUNCPTR (gst_camera_undistort_sink_event);
trans_class->src_event = GST_DEBUG_FUNCPTR (gst_camera_undistort_src_event);
opencvfilter_class->cv_set_caps = gst_camera_undistort_set_info;
opencvfilter_class->cv_trans_func =
gst_camera_undistort_transform_frame;
opencvfilter_class->cv_trans_func = gst_camera_undistort_transform_frame;
g_object_class_install_property (gobject_class, PROP_SHOW_UNDISTORTED,
g_param_spec_boolean ("undistort", "Apply camera corrections",
"Apply camera corrections",
DEFAULT_SHOW_UNDISTORTED, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_SHOW_UNDISTORTED,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_ALPHA,
g_param_spec_float ("alpha", "Pixels",
@ -164,8 +167,7 @@ gst_camera_undistort_class_init (GstCameraUndistortClass * klass)
gst_element_class_set_static_metadata (element_class,
"cameraundistort",
"Filter/Effect/Video",
"Performs camera undistort",
"Philippe Renon <philippe_renon@yahoo.fr>");
"Performs camera undistort", "Philippe Renon <philippe_renon@yahoo.fr>");
/* add sink and source pad templates */
caps = gst_opencv_caps_from_cv_image_type (CV_16UC1);
@ -238,7 +240,7 @@ gst_camera_undistort_set_property (GObject * object, guint prop_id,
}
str = g_value_get_string (value);
if (str)
undist->settings = g_strdup (str);
undist->settings = g_strdup (str);
undist->settingsChanged = TRUE;
break;
default:
@ -275,9 +277,12 @@ gst_camera_undistort_get_property (GObject * object, guint prop_id,
gboolean
gst_camera_undistort_set_info (GstOpencvVideoFilter * cvfilter,
gint in_width, gint in_height,
__attribute__((unused)) gint in_depth, __attribute__((unused)) gint in_channels,
__attribute__((unused)) gint out_width, __attribute__((unused)) gint out_height,
__attribute__((unused)) gint out_depth, __attribute__((unused)) gint out_channels)
__attribute__((unused)) gint in_depth,
__attribute__((unused)) gint in_channels,
__attribute__((unused)) gint out_width,
__attribute__((unused)) gint out_height,
__attribute__((unused)) gint out_depth,
__attribute__((unused)) gint out_channels)
{
GstCameraUndistort *undist = GST_CAMERA_UNDISTORT (cvfilter);
@ -302,7 +307,8 @@ gst_camera_undistort_transform_frame (GstOpencvVideoFilter * cvfilter,
}
static void
camera_undistort_run (GstCameraUndistort *undist, IplImage *img, IplImage *outimg)
camera_undistort_run (GstCameraUndistort * undist, IplImage * img,
IplImage * outimg)
{
const cv::Mat view = cv::cvarrToMat (img);
cv::Mat outview = cv::cvarrToMat (outimg);
@ -313,9 +319,10 @@ camera_undistort_run (GstCameraUndistort *undist, IplImage *img, IplImage *outim
undist->settingsChanged = FALSE;
undist->doUndistort = FALSE;
if (undist->showUndistorted && undist->settings) {
if (camera_deserialize_undistort_settings (
undist->settings, undist->cameraMatrix, undist->distCoeffs)) {
undist->doUndistort = camera_undistort_init_undistort_rectify_map (undist);
if (camera_deserialize_undistort_settings (undist->settings,
undist->cameraMatrix, undist->distCoeffs)) {
undist->doUndistort =
camera_undistort_init_undistort_rectify_map (undist);
}
}
}
@ -329,37 +336,38 @@ camera_undistort_run (GstCameraUndistort *undist, IplImage *img, IplImage *outim
const cv::Scalar CROP_COLOR (0, 255, 0);
cv::rectangle (outview, undist->validPixROI, CROP_COLOR);
}
}
else {
/* FIXME should use pass through to avoid this copy when not undistorting */
view.copyTo (outview);
} else {
/* FIXME should use pass through to avoid this copy when not undistorting */
view.copyTo (outview);
}
}
/* compute undistort */
static gboolean
camera_undistort_init_undistort_rectify_map (GstCameraUndistort *undist)
camera_undistort_init_undistort_rectify_map (GstCameraUndistort * undist)
{
cv::Size newImageSize;
cv::Rect validPixROI;
cv::Mat newCameraMatrix = cv::getOptimalNewCameraMatrix (
undist->cameraMatrix, undist->distCoeffs, undist->imageSize,
undist->alpha, newImageSize, &validPixROI);
cv::Mat newCameraMatrix =
cv::getOptimalNewCameraMatrix (undist->cameraMatrix, undist->distCoeffs,
undist->imageSize, undist->alpha, newImageSize, &validPixROI);
undist->validPixROI = validPixROI;
cv::initUndistortRectifyMap (undist->cameraMatrix, undist->distCoeffs, cv::Mat(),
newCameraMatrix, undist->imageSize, CV_16SC2, undist->map1, undist->map2);
cv::initUndistortRectifyMap (undist->cameraMatrix, undist->distCoeffs,
cv::Mat (), newCameraMatrix, undist->imageSize, CV_16SC2, undist->map1,
undist->map2);
return TRUE;
}
static gboolean
camera_undistort_calibration_event (GstCameraUndistort *undist, GstEvent *event)
camera_undistort_calibration_event (GstCameraUndistort * undist,
GstEvent * event)
{
g_free (undist->settings);
if (!gst_camera_event_parse_calibrated (event, &(undist->settings))) {
return FALSE;
return FALSE;
}
undist->settingsChanged = TRUE;
@ -368,35 +376,41 @@ camera_undistort_calibration_event (GstCameraUndistort *undist, GstEvent *event)
}
static gboolean
gst_camera_undistort_sink_event (GstBaseTransform *trans, GstEvent *event)
gst_camera_undistort_sink_event (GstBaseTransform * trans, GstEvent * event)
{
GstCameraUndistort *undist = GST_CAMERA_UNDISTORT (trans);
const GstStructure *structure = gst_event_get_structure (event);
if (GST_EVENT_TYPE (event) == GST_EVENT_CUSTOM_BOTH && structure) {
if (strcmp (gst_structure_get_name (structure), GST_CAMERA_EVENT_CALIBRATED_NAME) == 0) {
if (strcmp (gst_structure_get_name (structure),
GST_CAMERA_EVENT_CALIBRATED_NAME) == 0) {
return camera_undistort_calibration_event (undist, event);
}
}
return GST_BASE_TRANSFORM_CLASS (gst_camera_undistort_parent_class)->sink_event (trans, event);
return
GST_BASE_TRANSFORM_CLASS (gst_camera_undistort_parent_class)->sink_event
(trans, event);
}
static gboolean
gst_camera_undistort_src_event (GstBaseTransform *trans, GstEvent *event)
gst_camera_undistort_src_event (GstBaseTransform * trans, GstEvent * event)
{
GstCameraUndistort *undist = GST_CAMERA_UNDISTORT (trans);
const GstStructure *structure = gst_event_get_structure (event);
if (GST_EVENT_TYPE (event) == GST_EVENT_CUSTOM_BOTH && structure) {
if (strcmp (gst_structure_get_name (structure), GST_CAMERA_EVENT_CALIBRATED_NAME) == 0) {
if (strcmp (gst_structure_get_name (structure),
GST_CAMERA_EVENT_CALIBRATED_NAME) == 0) {
return camera_undistort_calibration_event (undist, event);
}
}
return GST_BASE_TRANSFORM_CLASS (gst_camera_undistort_parent_class)->src_event (trans, event);
return
GST_BASE_TRANSFORM_CLASS (gst_camera_undistort_parent_class)->src_event
(trans, event);
}
/* entry point to initialize the plug-in
@ -408,8 +422,7 @@ gst_camera_undistort_plugin_init (GstPlugin * plugin)
{
/* debug category for filtering log messages */
GST_DEBUG_CATEGORY_INIT (gst_camera_undistort_debug, "cameraundistort",
0,
"Performs camera undistortion");
0, "Performs camera undistortion");
return gst_element_register (plugin, "cameraundistort", GST_RANK_NONE,
GST_TYPE_CAMERA_UNDISTORT);

View file

@ -146,19 +146,22 @@ gst_cv_laplace_class_init (GstCvLaplaceClass * klass)
g_object_class_install_property (gobject_class, PROP_APERTURE_SIZE,
g_param_spec_int ("aperture-size", "aperture size",
"Size of the extended Laplace Kernel (1, 3, 5 or 7)", 1, 7,
DEFAULT_APERTURE_SIZE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_APERTURE_SIZE,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_SCALE,
g_param_spec_double ("scale", "scale factor",
"Scale factor", 0.0, G_MAXDOUBLE,
DEFAULT_SCALE_FACTOR, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_param_spec_double ("scale", "scale factor", "Scale factor", 0.0,
G_MAXDOUBLE, DEFAULT_SCALE_FACTOR,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_SHIFT,
g_param_spec_double ("shift", "Shift",
"Value added to the scaled source array elements", 0.0, G_MAXDOUBLE,
DEFAULT_SHIFT, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_SHIFT,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property (gobject_class, PROP_MASK,
g_param_spec_boolean ("mask", "Mask",
"Sets whether the detected edges should be used as a mask on the original input or not",
DEFAULT_MASK, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_MASK,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
gst_element_class_add_static_pad_template (element_class, &src_factory);
gst_element_class_add_static_pad_template (element_class, &sink_factory);
@ -190,15 +193,17 @@ gst_cv_laplace_cv_set_caps (GstOpencvVideoFilter * trans, gint in_width,
GstCvLaplace *filter = GST_CV_LAPLACE (trans);
if (filter->intermediary_img != NULL) {
cvReleaseImage (&filter->intermediary_img);
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->Laplace);
cvReleaseImage (&filter->intermediary_img);
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->Laplace);
}
filter->intermediary_img =
cvCreateImage (cvSize (out_width, out_height), IPL_DEPTH_16S, 1);
filter->cvGray = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->Laplace = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->Laplace =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
return TRUE;
}

View file

@ -371,11 +371,11 @@ gst_cv_smooth_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
{
GstCvSmooth *filter = GST_CV_SMOOTH (base);
Mat mat = cvarrToMat(img);
Mat mat = cvarrToMat (img);
if (filter->positionx != 0 || filter->positiony != 0 ||
filter->width != G_MAXINT || filter->height != G_MAXINT) {
Size mat_size = mat.size();
Size mat_size = mat.size ();
/* if the effect would start outside the image, just skip it */
if (filter->positionx >= mat_size.width
@ -385,12 +385,12 @@ gst_cv_smooth_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
if (filter->width <= 0 || filter->height <= 0)
return GST_FLOW_OK;
Rect mat_rect(filter->positionx,
Rect mat_rect (filter->positionx,
filter->positiony,
MIN(filter->width, mat_size.width - filter->positionx),
MIN(filter->height, mat_size.height - filter->positiony));
MIN (filter->width, mat_size.width - filter->positionx),
MIN (filter->height, mat_size.height - filter->positiony));
mat = mat(mat_rect);
mat = mat (mat_rect);
}
switch (filter->type) {

View file

@ -158,7 +158,8 @@ gst_cv_sobel_class_init (GstCvSobelClass * klass)
g_object_class_install_property (gobject_class, PROP_MASK,
g_param_spec_boolean ("mask", "Mask",
"Sets whether the detected derivative edges should be used as a mask on the original input or not",
DEFAULT_MASK, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
DEFAULT_MASK,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
gst_element_class_add_static_pad_template (element_class, &src_factory);
gst_element_class_add_static_pad_template (element_class, &sink_factory);
@ -191,12 +192,14 @@ gst_cv_sobel_set_caps (GstOpencvVideoFilter * transform,
GstCvSobel *filter = GST_CV_SOBEL (transform);
if (filter->cvSobel != NULL) {
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvSobel);
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvSobel);
}
filter->cvGray = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvSobel = cvCreateImage (cvSize (out_width, out_height), IPL_DEPTH_8U, 1);
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvSobel =
cvCreateImage (cvSize (out_width, out_height), IPL_DEPTH_8U, 1);
return TRUE;
}

View file

@ -644,8 +644,8 @@ initialise_sbm (GstDisparity * filter)
filter->depth_map_as_cvMat =
(void *) new Mat (cvarrToMat (filter->cvGray_depth_map1, false));
filter->sbm = StereoBM::create();
filter->sgbm = StereoSGBM::create(1,64,3);
filter->sbm = StereoBM::create ();
filter->sgbm = StereoSGBM::create (1, 64, 3);
filter->sbm->setBlockSize (9);
filter->sbm->setNumDisparities (32);
@ -676,8 +676,8 @@ initialise_sbm (GstDisparity * filter)
int
run_sbm_iteration (GstDisparity * filter)
{
((StereoBM *) filter->
sbm)->compute (*((Mat *) filter->img_left_as_cvMat_gray),
((StereoBM *) filter->sbm)->
compute (*((Mat *) filter->img_left_as_cvMat_gray),
*((Mat *) filter->img_right_as_cvMat_gray),
*((Mat *) filter->depth_map_as_cvMat));
@ -687,8 +687,8 @@ run_sbm_iteration (GstDisparity * filter)
int
run_sgbm_iteration (GstDisparity * filter)
{
((StereoSGBM *) filter->
sgbm)->compute (*((Mat *) filter->img_left_as_cvMat_gray),
((StereoSGBM *) filter->sgbm)->
compute (*((Mat *) filter->img_left_as_cvMat_gray),
*((Mat *) filter->img_right_as_cvMat_gray),
*((Mat *) filter->depth_map_as_cvMat));
@ -702,8 +702,8 @@ finalise_sbm (GstDisparity * filter)
delete (Mat *) filter->img_left_as_cvMat_gray;
delete (Mat *) filter->img_right_as_cvMat_gray;
filter->sbm.release();
filter->sgbm.release();
filter->sbm.release ();
filter->sgbm.release ();
return (0);
}

View file

@ -248,12 +248,14 @@ gst_edge_detect_set_caps (GstOpencvVideoFilter * transform,
GstEdgeDetect *filter = GST_EDGE_DETECT (transform);
if (filter->cvEdge != NULL) {
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvEdge);
cvReleaseImage (&filter->cvGray);
cvReleaseImage (&filter->cvEdge);
}
filter->cvGray = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvEdge = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvGray =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
filter->cvEdge =
cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1);
return TRUE;
}

View file

@ -355,7 +355,7 @@ gst_face_blur_transform_ip (GstOpencvVideoFilter * transform,
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
Mat image = cvarrToMat(filter->cvGray);
Mat image = cvarrToMat (filter->cvGray);
filter->cvCascade->detectMultiScale (image, faces, filter->scale_factor,
filter->min_neighbors, filter->flags,
cvSize (filter->min_size_width, filter->min_size_height), cvSize (0, 0));
@ -364,7 +364,7 @@ gst_face_blur_transform_ip (GstOpencvVideoFilter * transform,
for (i = 0; i < faces.size (); ++i) {
Rect *r = &faces[i];
Mat imag = cvarrToMat(img);
Mat imag = cvarrToMat (img);
Mat roi (imag, Rect (r->x, r->y, r->width, r->height));
blur (roi, roi, Size (11, 11));
GaussianBlur (roi, roi, Size (11, 11), 0, 0);

View file

@ -198,8 +198,7 @@ gst_grabcut_init (GstGrabcut * filter)
{
filter->test_mode = DEFAULT_TEST_MODE;
filter->scale = DEFAULT_SCALE;
gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter),
TRUE);
gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER (filter), TRUE);
}
@ -299,7 +298,7 @@ gst_grabcut_release_all_pointers (GstGrabcut * filter)
static GstFlowReturn
gst_grabcut_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buffer,
IplImage * img)
IplImage * img)
{
GstGrabcut *gc = GST_GRABCUT (filter);
gint alphapixels;
@ -312,7 +311,7 @@ gst_grabcut_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buffer,
gc->facepos.width = meta->w * gc->scale * 0.9;
gc->facepos.height = meta->h * gc->scale * 1.1;
} else {
memset (static_cast<void*>(&(gc->facepos)), 0, sizeof (gc->facepos));
memset (static_cast < void *>(&(gc->facepos)), 0, sizeof (gc->facepos));
}
/* normally input should be RGBA */
@ -401,9 +400,9 @@ int
initialise_grabcut (struct grabcut_params *GC, IplImage * image_c,
CvMat * mask_c)
{
GC->image = (void *) new Mat (cvarrToMat (image_c, false)); /* "true" refers to copydata */
GC->image = (void *) new Mat (cvarrToMat (image_c, false)); /* "true" refers to copydata */
GC->mask = (void *) new Mat (cvarrToMat (mask_c, false));
GC->bgdModel = (void *) new Mat (); /* "true" refers to copydata */
GC->bgdModel = (void *) new Mat (); /* "true" refers to copydata */
GC->fgdModel = (void *) new Mat ();
return (0);
@ -418,8 +417,7 @@ run_grabcut_iteration (struct grabcut_params *GC, IplImage * image_c,
if (cvCountNonZero (mask_c))
grabCut (*((Mat *) GC->image), *((Mat *) GC->mask), Rect (),
*((Mat *) GC->bgdModel), *((Mat *) GC->fgdModel), 1,
GC_INIT_WITH_MASK);
*((Mat *) GC->bgdModel), *((Mat *) GC->fgdModel), 1, GC_INIT_WITH_MASK);
return (0);
}
@ -431,8 +429,7 @@ run_grabcut_iteration2 (struct grabcut_params *GC, IplImage * image_c,
((Mat *) GC->image)->data = (uchar *) image_c->imageData;
((Mat *) GC->mask)->data = mask_c->data.ptr;
grabCut (*((Mat *) GC->image), *((Mat *) GC->mask), *(bbox),
*((Mat *) GC->bgdModel), *((Mat *) GC->fgdModel), 1,
GC_INIT_WITH_RECT);
*((Mat *) GC->bgdModel), *((Mat *) GC->fgdModel), 1, GC_INIT_WITH_RECT);
return (0);
}

View file

@ -191,21 +191,21 @@ gst_handdetect_class_init (GstHanddetectClass * klass)
g_param_spec_boolean ("display",
"Display",
"Whether the detected hands are highlighted in output frame",
TRUE, (GParamFlags)G_PARAM_READWRITE)
TRUE, (GParamFlags) G_PARAM_READWRITE)
);
g_object_class_install_property (gobject_class,
PROP_PROFILE_FIST,
g_param_spec_string ("profile_fist",
"Profile_fist",
"Location of HAAR cascade file (fist gesture)",
HAAR_FILE_FIST, (GParamFlags)G_PARAM_READWRITE)
HAAR_FILE_FIST, (GParamFlags) G_PARAM_READWRITE)
);
g_object_class_install_property (gobject_class,
PROP_PROFILE_PALM,
g_param_spec_string ("profile_palm",
"Profile_palm",
"Location of HAAR cascade file (palm gesture)",
HAAR_FILE_PALM, (GParamFlags)G_PARAM_READWRITE)
HAAR_FILE_PALM, (GParamFlags) G_PARAM_READWRITE)
);
/* FIXME: property name needs fixing */
g_object_class_install_property (gobject_class,
@ -213,7 +213,7 @@ gst_handdetect_class_init (GstHanddetectClass * klass)
g_param_spec_int ("ROI_X",
"ROI_X",
"X of left-top pointer in region of interest \nGestures in the defined region of interest will emit messages",
0, INT_MAX, 0, (GParamFlags)G_PARAM_READWRITE)
0, INT_MAX, 0, (GParamFlags) G_PARAM_READWRITE)
);
/* FIXME: property name needs fixing */
g_object_class_install_property (gobject_class,
@ -221,7 +221,7 @@ gst_handdetect_class_init (GstHanddetectClass * klass)
g_param_spec_int ("ROI_Y",
"ROI_Y",
"Y of left-top pointer in region of interest \nGestures in the defined region of interest will emit messages",
0, INT_MAX, 0, (GParamFlags)G_PARAM_READWRITE)
0, INT_MAX, 0, (GParamFlags) G_PARAM_READWRITE)
);
/* FIXME: property name needs fixing */
g_object_class_install_property (gobject_class,
@ -229,7 +229,7 @@ gst_handdetect_class_init (GstHanddetectClass * klass)
g_param_spec_int ("ROI_WIDTH",
"ROI_WIDTH",
"WIDTH of left-top pointer in region of interest \nGestures in the defined region of interest will emit messages",
0, INT_MAX, 0, (GParamFlags)G_PARAM_READWRITE)
0, INT_MAX, 0, (GParamFlags) G_PARAM_READWRITE)
);
/* FIXME: property name needs fixing */
g_object_class_install_property (gobject_class,
@ -237,7 +237,7 @@ gst_handdetect_class_init (GstHanddetectClass * klass)
g_param_spec_int ("ROI_HEIGHT",
"ROI_HEIGHT",
"HEIGHT of left-top pointer in region of interest \nGestures in the defined region of interest will emit messages",
0, INT_MAX, 0, (GParamFlags)G_PARAM_READWRITE)
0, INT_MAX, 0, (GParamFlags) G_PARAM_READWRITE)
);
gst_element_class_set_static_metadata (element_class,
@ -402,11 +402,11 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
/* check detection cascades */
if (filter->cvCascade_fist && filter->cvCascade_palm) {
/* cvt to gray colour space for hand detect */
/* cvt to gray colour space for hand detect */
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
/* detect FIST gesture fist */
Mat image = cvarrToMat(filter->cvGray);
Mat image = cvarrToMat (filter->cvGray);
Mat roi (image, Rect (filter->cvGray->origin,
filter->cvGray->origin, filter->cvGray->width,
filter->cvGray->height));
@ -431,7 +431,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
if (filter->prev_r == NULL)
filter->prev_r = &temp_r;
/* Get the best FIST gesture */
for (i = 0; i < hands.size(); i++) {
for (i = 0; i < hands.size (); i++) {
r = &hands[i];
distance = (int) sqrt (pow ((r->x - filter->prev_r->x),
2) + pow ((r->y - filter->prev_r->y), 2));
@ -497,7 +497,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
cvCircle (img, center, radius, CV_RGB (0, 0, 200), 1, 8, 0);
}
} else {
/* if NO FIST gesture, detecting PALM gesture */
/* if NO FIST gesture, detecting PALM gesture */
filter->cvCascade_palm->detectMultiScale (roi, hands, 1.1, 2,
CV_HAAR_DO_CANNY_PRUNING, cvSize (24, 24), cvSize (0, 0));
/* if PALM detected */
@ -518,7 +518,7 @@ gst_handdetect_transform_ip (GstOpencvVideoFilter * transform,
min_distance = img->width + img->height;
/* Init filter->prev_r */
temp_r = Rect (0, 0, 0, 0);
if (filter->prev_r == NULL)
if (filter->prev_r == NULL)
filter->prev_r = &temp_r;
/* Get the best PALM gesture */
for (i = 0; i < hands.size (); ++i) {

View file

@ -126,8 +126,8 @@ static void gst_retinex_get_property (GObject * object, guint prop_id,
static GstFlowReturn gst_retinex_transform_ip (GstOpencvVideoFilter * filter,
GstBuffer * buff, IplImage * img);
static gboolean gst_retinex_set_caps (GstOpencvVideoFilter* btrans,
gint in_width, gint in_height, gint in_depth, gint in_channels,
static gboolean gst_retinex_set_caps (GstOpencvVideoFilter * btrans,
gint in_width, gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
static void gst_retinex_release_all_images (GstRetinex * filter);
@ -229,9 +229,9 @@ gst_retinex_get_property (GObject * object, guint prop_id,
}
static gboolean
gst_retinex_set_caps (GstOpencvVideoFilter * filter, gint in_width, gint in_height,
gint in_depth, gint in_channels, gint out_width, gint out_height,
gint out_depth, gint out_channels)
gst_retinex_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels, gint out_width,
gint out_height, gint out_depth, gint out_channels)
{
GstRetinex *retinex = GST_RETINEX (filter);
CvSize size;
@ -276,14 +276,14 @@ gst_retinex_release_all_images (GstRetinex * filter)
static GstFlowReturn
gst_retinex_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buf,
IplImage * img)
IplImage * img)
{
GstRetinex *retinex = GST_RETINEX (filter);
double sigma = 14.0;
int gain = 128;
int offset = 128;
int filter_size;
Mat icvD = cvarrToMat(retinex->cvD, false);
Mat icvD = cvarrToMat (retinex->cvD, false);
/* Basic retinex restoration. The image and a filtered image are converted
to the log domain and subtracted.
@ -306,8 +306,7 @@ gst_retinex_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buf,
cvSub (retinex->cvB, retinex->cvC, retinex->cvA, NULL);
/* Restore */
cvConvertScale (retinex->cvA, img, (float) gain,
(float) offset);
cvConvertScale (retinex->cvA, img, (float) gain, (float) offset);
}
/* Multiscale retinex restoration. The image and a set of filtered images are
converted to the log domain and subtracted from the original with some set
@ -353,8 +352,7 @@ gst_retinex_transform_ip (GstOpencvVideoFilter * filter, GstBuffer * buf,
}
/* Restore */
cvConvertScale (retinex->cvB, img, (float) gain,
(float) offset);
cvConvertScale (retinex->cvB, img, (float) gain, (float) offset);
}
return GST_FLOW_OK;

View file

@ -97,7 +97,8 @@ GST_DEBUG_CATEGORY_STATIC (gst_segmentation_debug);
#define GST_CAT_DEFAULT gst_segmentation_debug
using namespace cv;
using namespace cv::bgsegm;
using namespace
cv::bgsegm;
/* Filter signals and args */
enum
@ -125,12 +126,17 @@ typedef enum
#define DEFAULT_LEARNING_RATE 0.01
#define GST_TYPE_SEGMENTATION_METHOD (gst_segmentation_method_get_type ())
static GType
static
GType
gst_segmentation_method_get_type (void)
{
static GType etype = 0;
static
GType
etype = 0;
if (etype == 0) {
static const GEnumValue values[] = {
static const
GEnumValue
values[] = {
{METHOD_BOOK, "Codebook-based segmentation (Bradski2008)", "codebook"},
{METHOD_MOG, "Mixture-of-Gaussians segmentation (Bowden2001)", "mog"},
{METHOD_MOG2, "Mixture-of-Gaussians segmentation (Zivkovic2004)", "mog2"},
@ -143,55 +149,79 @@ gst_segmentation_method_get_type (void)
G_DEFINE_TYPE (GstSegmentation, gst_segmentation, GST_TYPE_OPENCV_VIDEO_FILTER);
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
static
GstStaticPadTemplate
sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
static
GstStaticPadTemplate
src_factory = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGBA")));
static void gst_segmentation_set_property (GObject * object, guint prop_id,
static void
gst_segmentation_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
static void gst_segmentation_get_property (GObject * object, guint prop_id,
static void
gst_segmentation_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_segmentation_transform_ip (GstOpencvVideoFilter * filter,
static
GstFlowReturn
gst_segmentation_transform_ip (GstOpencvVideoFilter * filter,
GstBuffer * buffer, IplImage * img);
static gboolean gst_segmentation_stop (GstBaseTransform * basesrc);
static gboolean gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
static
gboolean
gst_segmentation_stop (GstBaseTransform * basesrc);
static
gboolean
gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels);
static void gst_segmentation_release_all_pointers (GstSegmentation * filter);
static void
gst_segmentation_release_all_pointers (GstSegmentation * filter);
/* Codebook algorithm + connected components functions*/
static int update_codebook (unsigned char *p, codeBook * c,
static int
update_codebook (unsigned char *p, codeBook * c,
unsigned *cbBounds, int numChannels);
static int clear_stale_entries (codeBook * c);
static unsigned char background_diff (unsigned char *p, codeBook * c,
static int
clear_stale_entries (codeBook * c);
static unsigned char
background_diff (unsigned char *p, codeBook * c,
int numChannels, int *minMod, int *maxMod);
static void find_connected_components (IplImage * mask, int poly1_hull0,
static void
find_connected_components (IplImage * mask, int poly1_hull0,
float perimScale, CvMemStorage * mem_storage, CvSeq * contours);
/* MOG (Mixture-of-Gaussians functions */
static int initialise_mog (GstSegmentation * filter);
static int run_mog_iteration (GstSegmentation * filter);
static int run_mog2_iteration (GstSegmentation * filter);
static int finalise_mog (GstSegmentation * filter);
static int
initialise_mog (GstSegmentation * filter);
static int
run_mog_iteration (GstSegmentation * filter);
static int
run_mog2_iteration (GstSegmentation * filter);
static int
finalise_mog (GstSegmentation * filter);
/* initialize the segmentation's class */
static void
gst_segmentation_class_init (GstSegmentationClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
GstOpencvVideoFilterClass *cvfilter_class =
(GstOpencvVideoFilterClass *) klass;
GObjectClass *
gobject_class;
GstElementClass *
element_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *
basesrc_class = GST_BASE_TRANSFORM_CLASS (klass);
GstOpencvVideoFilterClass *
cvfilter_class = (GstOpencvVideoFilterClass *) klass;
gobject_class = (GObjectClass *) klass;
@ -251,7 +281,8 @@ static void
gst_segmentation_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstSegmentation *filter = GST_SEGMENTATION (object);
GstSegmentation *
filter = GST_SEGMENTATION (object);
switch (prop_id) {
case PROP_METHOD:
@ -273,7 +304,8 @@ static void
gst_segmentation_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstSegmentation *filter = GST_SEGMENTATION (object);
GstSegmentation *
filter = GST_SEGMENTATION (object);
switch (prop_id) {
case PROP_METHOD:
@ -291,12 +323,14 @@ gst_segmentation_get_property (GObject * object, guint prop_id,
}
}
static gboolean
static
gboolean
gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
gint in_height, gint in_depth, gint in_channels,
gint out_width, gint out_height, gint out_depth, gint out_channels)
{
GstSegmentation *segmentation = GST_SEGMENTATION (filter);
GstSegmentation *
segmentation = GST_SEGMENTATION (filter);
CvSize size;
size = cvSize (in_width, in_height);
@ -333,10 +367,12 @@ gst_segmentation_set_caps (GstOpencvVideoFilter * filter, gint in_width,
}
/* Clean up */
static gboolean
static
gboolean
gst_segmentation_stop (GstBaseTransform * basesrc)
{
GstSegmentation *filter = GST_SEGMENTATION (basesrc);
GstSegmentation *
filter = GST_SEGMENTATION (basesrc);
if (filter->cvRGB != NULL)
gst_segmentation_release_all_pointers (filter);
@ -360,12 +396,15 @@ gst_segmentation_release_all_pointers (GstSegmentation * filter)
finalise_mog (filter);
}
static GstFlowReturn
gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter, GstBuffer * buffer,
IplImage * img)
static
GstFlowReturn
gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter,
GstBuffer * buffer, IplImage * img)
{
GstSegmentation *filter = GST_SEGMENTATION (cvfilter);
int j;
GstSegmentation *
filter = GST_SEGMENTATION (cvfilter);
int
j;
filter->framecount++;
@ -381,9 +420,12 @@ gst_segmentation_transform_ip (GstOpencvVideoFilter * cvfilter, GstBuffer * buff
* [2] "Real-time Foreground-Background Segmentation using Codebook Model",
* Real-time Imaging, Volume 11, Issue 3, Pages 167-256, June 2005. */
if (METHOD_BOOK == filter->method) {
unsigned cbBounds[3] = { 10, 5, 5 };
int minMod[3] = { 20, 20, 20 }, maxMod[3] = {
20, 20, 20};
unsigned
cbBounds[3] = { 10, 5, 5 };
int
minMod[3] = { 20, 20, 20 }, maxMod[3] = {
20, 20, 20
};
if (filter->framecount < 30) {
/* Learning background phase: update_codebook on every frame */
@ -498,9 +540,14 @@ update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
int numChannels)
{
/* c->t+=1; */
unsigned int high[3], low[3];
int n, i;
int matchChannel;
unsigned int
high[3],
low[3];
int
n,
i;
int
matchChannel;
for (n = 0; n < numChannels; n++) {
high[n] = p[n] + cbBounds[n];
@ -539,13 +586,15 @@ update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
/* OVERHEAD TO TRACK POTENTIAL STALE ENTRIES */
for (int s = 0; s < c->numEntries; s++) {
/* Track which codebook entries are going stale: */
int negRun = c->t - c->cb[s]->t_last_update;
int
negRun = c->t - c->cb[s]->t_last_update;
if (c->cb[s]->stale < negRun)
c->cb[s]->stale = negRun;
}
/* ENTER A NEW CODEWORD IF NEEDED */
if (i == c->numEntries) { /* if no existing codeword found, make one */
code_element **foo =
code_element **
foo =
(code_element **) g_malloc (sizeof (code_element *) *
(c->numEntries + 1));
for (int ii = 0; ii < c->numEntries; ii++) {
@ -592,12 +641,18 @@ update_codebook (unsigned char *p, codeBook * c, unsigned *cbBounds,
int
clear_stale_entries (codeBook * c)
{
int staleThresh = c->t >> 1;
int *keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
int keepCnt = 0;
code_element **foo;
int k;
int numCleared;
int
staleThresh = c->t >> 1;
int *
keep = (int *) g_malloc (sizeof (int) * (c->numEntries));
int
keepCnt = 0;
code_element **
foo;
int
k;
int
numCleared;
/* SEE WHICH CODEBOOK ENTRIES ARE TOO STALE */
for (int i = 0; i < c->numEntries; i++) {
if (c->cb[i]->stale > staleThresh)
@ -657,9 +712,11 @@ unsigned char
background_diff (unsigned char *p, codeBook * c, int numChannels,
int *minMod, int *maxMod)
{
int matchChannel;
int
matchChannel;
/* SEE IF THIS FITS AN EXISTING CODEWORD */
int i;
int
i;
for (i = 0; i < c->numEntries; i++) {
matchChannel = 0;
for (int n = 0; n < numChannels; n++) {
@ -715,11 +772,17 @@ find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
CvMemStorage * mem_storage, CvSeq * contours)
{
CvContourScanner scanner;
CvSeq *c;
int numCont = 0;
CvSeq *
c;
int
numCont = 0;
/* Just some convenience variables */
const CvScalar CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
const CvScalar CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
const
CvScalar
CVX_WHITE = CV_RGB (0xff, 0xff, 0xff);
const
CvScalar
CVX_BLACK = CV_RGB (0x00, 0x00, 0x00);
/* CLEAN UP RAW MASK */
cvMorphologyEx (mask, mask, 0, 0, CV_MOP_OPEN, CVCLOSE_ITR);
@ -735,15 +798,18 @@ find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint (0, 0));
while ((c = cvFindNextContour (scanner)) != NULL) {
double len = cvContourArea (c, CV_WHOLE_SEQ, 0);
double
len = cvContourArea (c, CV_WHOLE_SEQ, 0);
/* calculate perimeter len threshold: */
double q = (mask->height + mask->width) / perimScale;
double
q = (mask->height + mask->width) / perimScale;
/* Get rid of blob if its perimeter is too small: */
if (len < q) {
cvSubstituteContour (scanner, NULL);
} else {
/* Smooth its edges if its large enough */
CvSeq *c_new;
CvSeq *
c_new;
if (poly1_hull0) {
/* Polygonal approximation */
c_new =
@ -772,8 +838,9 @@ find_connected_components (IplImage * mask, int poly1_hull0, float perimScale,
int
initialise_mog (GstSegmentation * filter)
{
filter->img_input_as_cvMat = (void *) new Mat (cvarrToMat (filter->cvYUV, false));
filter->img_fg_as_cvMat = (void *) new Mat (cvarrToMat(filter->cvFG, false));
filter->img_input_as_cvMat = (void *) new
Mat (cvarrToMat (filter->cvYUV, false));
filter->img_fg_as_cvMat = (void *) new Mat (cvarrToMat (filter->cvFG, false));
filter->mog = bgsegm::createBackgroundSubtractorMOG ();
filter->mog2 = createBackgroundSubtractorMOG2 ();
@ -799,9 +866,8 @@ run_mog_iteration (GstSegmentation * filter)
European Workshop on Advanced Video-Based Surveillance Systems, 2001
*/
filter->mog->apply (*((Mat *) filter->
img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
filter->learning_rate);
filter->mog->apply (*((Mat *) filter->img_input_as_cvMat),
*((Mat *) filter->img_fg_as_cvMat), filter->learning_rate);
return (0);
}
@ -810,9 +876,8 @@ int
run_mog2_iteration (GstSegmentation * filter)
{
((Mat *) filter->img_input_as_cvMat)->data =
(uchar *) filter->cvYUV->imageData;
((Mat *) filter->img_fg_as_cvMat)->data =
(uchar *) filter->cvFG->imageData;
(uchar *) filter->cvYUV->imageData;
((Mat *) filter->img_fg_as_cvMat)->data = (uchar *) filter->cvFG->imageData;
/*
BackgroundSubtractorMOG2 [1], Gaussian Mixture-based Background/Foreground
@ -827,9 +892,8 @@ run_mog2_iteration (GstSegmentation * filter)
Letters, vol. 27, no. 7, pages 773-780, 2006.
*/
filter->mog2->apply (*((Mat *) filter->
img_input_as_cvMat), *((Mat *) filter->img_fg_as_cvMat),
filter->learning_rate);
filter->mog2->apply (*((Mat *) filter->img_input_as_cvMat),
*((Mat *) filter->img_fg_as_cvMat), filter->learning_rate);
return (0);
}

View file

@ -92,7 +92,7 @@ gst_opencv_parse_iplimage_params_from_caps (GstCaps * caps, gint * width,
}
return gst_opencv_iplimage_params_from_video_info (&info, width, height,
ipldepth, channels, err);
ipldepth, channels, err);
}
gboolean
@ -118,7 +118,7 @@ gst_opencv_iplimage_params_from_video_info (GstVideoInfo * info, gint * width,
gboolean
gst_opencv_cv_image_type_from_video_format (GstVideoFormat format,
int * cv_type, GError ** err)
int *cv_type, GError ** err)
{
const gchar *format_str;

View file

@ -76,10 +76,10 @@ static void gst_opencv_video_filter_set_property (GObject * object,
static void gst_opencv_video_filter_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_opencv_video_filter_transform_frame (GstVideoFilter * trans,
GstVideoFrame * inframe, GstVideoFrame * outframe);
static GstFlowReturn gst_opencv_video_filter_transform_frame_ip (GstVideoFilter * trans,
GstVideoFrame * frame);
static GstFlowReturn gst_opencv_video_filter_transform_frame (GstVideoFilter *
trans, GstVideoFrame * inframe, GstVideoFrame * outframe);
static GstFlowReturn gst_opencv_video_filter_transform_frame_ip (GstVideoFilter
* trans, GstVideoFrame * frame);
static gboolean gst_opencv_video_filter_set_info (GstVideoFilter * trans,
GstCaps * incaps, GstVideoInfo * in_info, GstCaps * outcaps,
@ -117,7 +117,8 @@ gst_opencv_video_filter_class_init (GstOpencvVideoFilterClass * klass)
gobject_class->get_property = gst_opencv_video_filter_get_property;
vfilter_class->transform_frame = gst_opencv_video_filter_transform_frame;
vfilter_class->transform_frame_ip = gst_opencv_video_filter_transform_frame_ip;
vfilter_class->transform_frame_ip =
gst_opencv_video_filter_transform_frame_ip;
vfilter_class->set_info = gst_opencv_video_filter_set_info;
}
@ -127,8 +128,8 @@ gst_opencv_video_filter_init (GstOpencvVideoFilter * transform)
}
static GstFlowReturn
gst_opencv_video_filter_transform_frame (GstVideoFilter *trans,
GstVideoFrame *inframe, GstVideoFrame *outframe)
gst_opencv_video_filter_transform_frame (GstVideoFilter * trans,
GstVideoFrame * inframe, GstVideoFrame * outframe)
{
GstOpencvVideoFilter *transform;
GstOpencvVideoFilterClass *fclass;
@ -180,7 +181,7 @@ gst_opencv_video_filter_transform_frame_ip (GstVideoFilter * trans,
static gboolean
gst_opencv_video_filter_set_info (GstVideoFilter * trans, GstCaps * incaps,
GstVideoInfo * in_info, GstCaps * outcaps, GstVideoInfo * out_info)
GstVideoInfo * in_info, GstCaps * outcaps, GstVideoInfo * out_info)
{
GstOpencvVideoFilter *transform = GST_OPENCV_VIDEO_FILTER (trans);
GstOpencvVideoFilterClass *klass =