use of org.bytedeco.javacpp.opencv_core.CvPoint2D32f in project myrobotlab by MyRobotLab.
the class OpenCVFilterAffine method process.
@Override
public IplImage process(IplImage image, OpenCVData data) throws InterruptedException {
// TODO : Create the affine filter and return the new image
// Find the center of the image
CvPoint2D32f center = cvPoint2D32f(image.width() / 2.0F, image.height() / 2.0F);
// TODO: test this...
// CvBox2D box = new CvBox2D(center, cvSize2D32f(image.width() - 1,
// image.height() - 1), angle);
CvBox2D box = new CvBox2D();
box.center(center);
box.size(cvSize2D32f(image.width() - 1, image.height() - 1));
box.angle(angle);
CvPoint2D32f points = new CvPoint2D32f(4);
cvBoxPoints(box, points);
// CvMat pointMat = cvCreateMat(1, 4, CV_32FC2);
// pointMat.put(0, 0, 0, points.position(0).x());
// pointMat.put(0, 0, 1, points.position(0).y());
// pointMat.put(0, 1, 0, points.position(1).x());
// pointMat.put(0, 1, 1, points.position(1).y());
// pointMat.put(0, 2, 0, points.position(2).x());
// pointMat.put(0, 2, 1, points.position(2).y());
// pointMat.put(0, 3, 0, points.position(3).x());
// pointMat.put(0, 3, 1, points.position(3).y());
// CvRect boundingRect = cvBoundingRect(pointMat, 0);
// CvMat dst = cvCreateMat(boundingRect.height(), boundingRect.width(),
// image.type());
// CvMat dst = cvCreateMat(boundingRect.height(), boundingRect.width(),
// CV_32FC1);
CvMat rotMat = cvCreateMat(2, 3, CV_32FC1);
cv2DRotationMatrix(center, angle, 1, rotMat);
// Add the transpose matrix
double x = rotMat.get(0, 2) + dx;
rotMat.put(0, 2, x);
// Add the transpose matrix
double y = rotMat.get(1, 2) + dy;
rotMat.put(1, 2, y);
// double y_1 = ((boundingRect.width() - image.width()) / 2.0F) +
// rotMat.get(0, 2);
// double y_2 = ((boundingRect.height() - image.height()) / 2.0F +
// rotMat.get(1, 2));
// rotMat.put(0, 2, y_1);
// rotMat.put(1, 2, y_2);
// CvScalar fillval = cvScalarAll(0);
// IplImage dst_frame = cvCloneImage(image);
// cvWarpAffine(image, dst_frame, rotMat);
// System.out.println(rotMat);
cvWarpAffine(image, image, rotMat);
return image;
}
use of org.bytedeco.javacpp.opencv_core.CvPoint2D32f in project myrobotlab by MyRobotLab.
the class OpenCVFilterGoodFeaturesToTrack method process.
@Override
public IplImage process(IplImage image, VisionData data) {
if (channels == 3) {
grey = IplImage.create(imageSize, 8, 1);
cvCvtColor(image, grey, CV_BGR2GRAY);
} else {
grey = image;
}
if (lastMaxPointCount != maxPointCount) {
cornerCount.setValue(maxPointCount);
count = new int[] { maxPointCount };
lastMaxPointCount = maxPointCount;
}
// FIXME copy dont create ?
corners = new CvPoint2D32f(maxPointCount);
++totalIterations;
IntPointer countPointer = new IntPointer(count);
cvGoodFeaturesToTrack(grey, eig, temp, corners, countPointer, qualityLevel, minDistance, mask, blockSize, useHarris, k);
// FIXME - another sad data conversion :(
ArrayList<Point2Df> points = new ArrayList<Point2Df>();
Float value = null;
int x, y;
for (int i = 0; i < count[0]; ++i) {
corners.position(i);
x = (int) corners.x();
y = (int) corners.y();
// da[i] = da[i] / frame.width();
// da[i + 1] = da[i + 1] / frame.height();
// x = Math.round(a)
String key = String.format("%d.%d", x, y);
if (values.containsKey(key)) {
value = values.get(key);
++value;
values.put(key, value);
// log.warn(value);
// log.warn(values.get(key));
} else {
value = new Float(1f);
values.put(key, value);
}
Point2Df np = null;
if (useFloatValues) {
np = new Point2Df((float) x / width, (float) y / height, value);
} else {
np = new Point2Df(x, y, value);
}
if (np.value > oldest.value) {
oldest = np;
}
points.add(np);
}
data.set(points);
return image;
}
use of org.bytedeco.javacpp.opencv_core.CvPoint2D32f in project myrobotlab by MyRobotLab.
the class OpenCVFilterGoodFeaturesToTrack method process.
@Override
public IplImage process(IplImage image, OpenCVData data) {
if (channels == 3) {
grey = IplImage.create(imageSize, 8, 1);
cvCvtColor(image, grey, CV_BGR2GRAY);
} else {
grey = image;
}
if (lastMaxPointCount != maxPointCount) {
cornerCount.setValue(maxPointCount);
count = new int[] { maxPointCount };
lastMaxPointCount = maxPointCount;
}
// FIXME copy dont create ?
corners = new CvPoint2D32f(maxPointCount);
++totalIterations;
IntPointer countPointer = new IntPointer(count);
cvGoodFeaturesToTrack(grey, eig, temp, corners, countPointer, qualityLevel, minDistance, mask, blockSize, useHarris, k);
// FIXME - another sad data conversion :(
ArrayList<Point2Df> points = new ArrayList<Point2Df>();
Float value = null;
int x, y;
for (int i = 0; i < count[0]; ++i) {
corners.position(i);
x = (int) corners.x();
y = (int) corners.y();
// da[i] = da[i] / frame.width();
// da[i + 1] = da[i + 1] / frame.height();
// x = Math.round(a)
String key = String.format("%d.%d", x, y);
if (values.containsKey(key)) {
value = values.get(key);
++value;
values.put(key, value);
// log.warn(value);
// log.warn(values.get(key));
} else {
value = new Float(1f);
values.put(key, value);
}
Point2Df np = null;
if (useFloatValues) {
np = new Point2Df((float) x / width, (float) y / height, value);
} else {
np = new Point2Df(x, y, value);
}
if (np.value > oldest.value) {
oldest = np;
}
points.add(np);
}
data.set(points);
return image;
}
use of org.bytedeco.javacpp.opencv_core.CvPoint2D32f in project myrobotlab by MyRobotLab.
the class OpenCVFilterLKOpticalTrack method imageChanged.
@Override
public void imageChanged(IplImage image) {
eig = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
tmp = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
imgB = IplImage.create(imageSize, 8, 1);
imgA = IplImage.create(imageSize, 8, 1);
if (channels == 3) {
cvCvtColor(image, imgB, CV_BGR2GRAY);
cvCopy(imgB, imgA);
}
cornersA = new CvPoint2D32f(maxPointCount);
cornersB = new CvPoint2D32f(maxPointCount);
cornersC = new CvPoint2D32f(maxPointCount);
// Call Lucas Kanade algorithm
features_found = new BytePointer(maxPointCount);
feature_errors = new FloatPointer(maxPointCount);
}
use of org.bytedeco.javacpp.opencv_core.CvPoint2D32f in project myrobotlab by MyRobotLab.
the class OpenCVFilterLKOpticalTrack method imageChanged.
@Override
public void imageChanged(IplImage image) {
eig = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
tmp = IplImage.create(imageSize, IPL_DEPTH_32F, 1);
imgB = IplImage.create(imageSize, 8, 1);
imgA = IplImage.create(imageSize, 8, 1);
if (channels == 3) {
cvCvtColor(image, imgB, CV_BGR2GRAY);
cvCopy(imgB, imgA);
}
cornersA = new CvPoint2D32f(maxPointCount);
cornersB = new CvPoint2D32f(maxPointCount);
cornersC = new CvPoint2D32f(maxPointCount);
// Call Lucas Kanade algorithm
features_found = new BytePointer(maxPointCount);
feature_errors = new FloatPointer(maxPointCount);
}
Aggregations