use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.
the class TestReidSolomonCodes method correctErrors_hand.
/**
* Compare against a hand computed scenario
*/
@Test
public void correctErrors_hand() {
GrowQueue_I8 message = GrowQueue_I8.parseHex("[ 0x40, 0xd2, 0x75, 0x47, 0x76, 0x17, 0x32, 0x06, 0x27, 0x26, 0x96, 0xc6, 0xc6, 0x96, 0x70, 0xec ]");
GrowQueue_I8 ecc = new GrowQueue_I8();
GrowQueue_I8 syndromes = new GrowQueue_I8();
GrowQueue_I8 errorLocator = new GrowQueue_I8();
int nsyn = 10;
ReidSolomonCodes alg = new ReidSolomonCodes(8, primitive8);
alg.generator(nsyn);
alg.computeECC(message, ecc);
GrowQueue_I8 corrupted = message.copy();
corrupted.data[0] = 0;
corrupted.data[4] = 8;
corrupted.data[5] = 9;
alg.computeSyndromes(corrupted, ecc, syndromes);
alg.findErrorLocatorPolynomialBM(syndromes, errorLocator);
GrowQueue_I32 errorLocations = new GrowQueue_I32(3);
errorLocations.data[0] = 0;
errorLocations.data[1] = 4;
errorLocations.data[2] = 5;
errorLocations.size = 3;
alg.correctErrors(corrupted, message.size + ecc.size, syndromes, errorLocator, errorLocations);
assertEquals(corrupted.size, message.size);
for (int j = 0; j < corrupted.size; j++) {
assertEquals(corrupted.get(j), message.get(j));
}
}
use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.
the class ExampleMultiviewSceneReconstruction method estimateMotionPnP.
/**
* Estimate the motion between two images. Image A is assumed to have known features with 3D coordinates already
* and image B is an unprocessed image with no 3D features yet.
*/
private void estimateMotionPnP(int imageA, int imageB) {
// Mark image B as processed so that it isn't processed a second time.
processedImage[imageB] = true;
System.out.println("Estimating PnP motion between " + imageA + " and " + imageB);
// initially prune features using essential matrix
Se3_F64 dummy = new Se3_F64();
List<AssociatedIndex> inliers = new ArrayList<>();
if (!estimateStereoPose(imageA, imageB, dummy, inliers))
throw new RuntimeException("The first image pair is a bad keyframe!");
FastQueue<Point2D_F64> pixelsA = imagePixels.get(imageA);
FastQueue<Point2D_F64> pixelsB = imagePixels.get(imageB);
List<Feature3D> featuresA = imageFeature3D.get(imageA);
// this should be empty
List<Feature3D> featuresB = imageFeature3D.get(imageB);
// create the associated pair for motion estimation
List<Point2D3D> features = new ArrayList<>();
List<AssociatedIndex> inputRansac = new ArrayList<>();
List<AssociatedIndex> unmatched = new ArrayList<>();
for (int i = 0; i < inliers.size(); i++) {
AssociatedIndex a = inliers.get(i);
Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
if (t != null) {
Point2D_F64 p = pixelsB.get(a.dst);
features.add(new Point2D3D(p, t.worldPt));
inputRansac.add(a);
} else {
unmatched.add(a);
}
}
// make sure there are enough features to estimate motion
if (features.size() < 15) {
System.out.println(" Too few features for PnP!! " + features.size());
return;
}
// estimate the motion between the two images
if (!estimatePnP.process(features))
throw new RuntimeException("Motion estimation failed");
// refine the motion estimate using non-linear optimization
Se3_F64 motionWorldToB = new Se3_F64();
if (!refinePnP.fitModel(estimatePnP.getMatchSet(), estimatePnP.getModelParameters(), motionWorldToB))
throw new RuntimeException("Refine failed!?!?");
motionWorldToCamera[imageB].set(motionWorldToB);
estimatedImage[imageB] = true;
// Add all tracks in the inlier list to the B's list of 3D features
int N = estimatePnP.getMatchSet().size();
boolean[] inlierPnP = new boolean[features.size()];
for (int i = 0; i < N; i++) {
int index = estimatePnP.getInputIndex(i);
AssociatedIndex a = inputRansac.get(index);
// find the track that this was associated with and add it to B
Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
featuresB.add(t);
t.frame.add(imageB);
t.obs.grow().set(pixelsB.get(a.dst));
inlierPnP[index] = true;
}
// Create new tracks for all features which were a member of essential matrix but not used to estimate
// the motion using PnP.
Se3_F64 motionBtoWorld = motionWorldToB.invert(null);
Se3_F64 motionWorldToA = motionWorldToCamera[imageA];
Se3_F64 motionBtoA = motionBtoWorld.concat(motionWorldToA, null);
Point3D_F64 pt_in_b = new Point3D_F64();
int totalAdded = 0;
GrowQueue_I32 colorsA = imageColors.get(imageA);
for (AssociatedIndex a : unmatched) {
if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
continue;
// the feature has to be in front of the camera
if (pt_in_b.z > 0) {
Feature3D t = new Feature3D();
// transform from B back to world frame
SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
t.color = colorsA.get(a.src);
t.obs.grow().set(pixelsA.get(a.src));
t.obs.grow().set(pixelsB.get(a.dst));
t.frame.add(imageA);
t.frame.add(imageB);
featuresAll.add(t);
featuresA.add(t);
featuresB.add(t);
totalAdded++;
}
}
// out better if the 3D coordinate is re-triangulated as a new feature
for (int i = 0; i < features.size(); i++) {
if (inlierPnP[i])
continue;
AssociatedIndex a = inputRansac.get(i);
if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
continue;
// the feature has to be in front of the camera
if (pt_in_b.z > 0) {
Feature3D t = new Feature3D();
// transform from B back to world frame
SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
// only add this feature to image B since a similar one already exists in A.
t.color = colorsA.get(a.src);
t.obs.grow().set(pixelsB.get(a.dst));
t.frame.add(imageB);
featuresAll.add(t);
featuresB.add(t);
totalAdded++;
}
}
System.out.println(" New added " + totalAdded + " tracksA.size = " + featuresA.size() + " tracksB.size = " + featuresB.size());
}
use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.
the class ExampleMultiviewSceneReconstruction method detectImageFeatures.
/**
* Detect image features in all the images. Save location, description, and color
*/
private void detectImageFeatures(List<BufferedImage> colorImages) {
System.out.println("Detecting Features in each image. Total " + colorImages.size());
for (int i = 0; i < colorImages.size(); i++) {
System.out.print("*");
BufferedImage colorImage = colorImages.get(i);
FastQueue<BrightFeature> features = new SurfFeatureQueue(64);
FastQueue<Point2D_F64> pixels = new FastQueue<>(Point2D_F64.class, true);
GrowQueue_I32 colors = new GrowQueue_I32();
detectFeatures(colorImage, features, pixels, colors);
imageVisualFeatures.add(features);
imagePixels.add(pixels);
imageColors.add(colors);
}
System.out.println();
}
use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.
the class TestRefinePolygonToContour method reverseOrder.
@Test
public void reverseOrder() {
RectangleLength2D_I32 rect = new RectangleLength2D_I32(0, 0, 10, 5);
List<Point2D_I32> contour = rectToContour(rect);
GrowQueue_I32 vertexes = computeContourVertexes(rect);
flip(vertexes.data, vertexes.size);
RefinePolygonToContour alg = new RefinePolygonToContour();
Polygon2D_F64 found = new Polygon2D_F64();
alg.process(contour, vertexes, found);
assertTrue(checkPolygon(new double[] { 0, 0, 0, 4, 9, 4, 9, 0 }, found));
}
use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.
the class TestRefinePolygonToContour method basic.
@Test
public void basic() {
RectangleLength2D_I32 rect = new RectangleLength2D_I32(0, 0, 10, 5);
List<Point2D_I32> contour = rectToContour(rect);
GrowQueue_I32 vertexes = computeContourVertexes(rect);
RefinePolygonToContour alg = new RefinePolygonToContour();
Polygon2D_F64 found = new Polygon2D_F64();
alg.process(contour, vertexes, found);
assertTrue(checkPolygon(new double[] { 0, 0, 9, 0, 9, 4, 0, 4 }, found));
}
Aggregations