use of boofcv.struct.feature.TupleDesc_F64 in project BoofCV by lessthanoptimal.
the class ExampleColorHistogramLookup method independentHueSat.
/**
* Computes two independent 1D histograms from hue and saturation. Less affects by sparsity, but can produce
* worse results since the basic assumption that hue and saturation are decoupled is most of the time false.
*/
public static List<double[]> independentHueSat(List<File> images) {
List<double[]> points = new ArrayList<>();
// The number of bins is an important parameter. Try adjusting it
TupleDesc_F64 histogramHue = new TupleDesc_F64(30);
TupleDesc_F64 histogramValue = new TupleDesc_F64(30);
List<TupleDesc_F64> histogramList = new ArrayList<>();
histogramList.add(histogramHue);
histogramList.add(histogramValue);
Planar<GrayF32> rgb = new Planar<>(GrayF32.class, 1, 1, 3);
Planar<GrayF32> hsv = new Planar<>(GrayF32.class, 1, 1, 3);
for (File f : images) {
BufferedImage buffered = UtilImageIO.loadImageNotNull(f.getPath());
rgb.reshape(buffered.getWidth(), buffered.getHeight());
hsv.reshape(buffered.getWidth(), buffered.getHeight());
ConvertBufferedImage.convertFrom(buffered, rgb, true);
ColorHsv.rgbToHsv(rgb, hsv);
GHistogramFeatureOps.histogram(hsv.getBand(0), 0, 2 * Math.PI, histogramHue);
GHistogramFeatureOps.histogram(hsv.getBand(1), 0, 1, histogramValue);
// need to combine them into a single descriptor for processing later on
TupleDesc_F64 imageHist = UtilFeature.combine(histogramList, null);
// normalize so that image size doesn't matter
UtilFeature.normalizeL2(imageHist);
points.add(imageHist.data);
}
return points;
}
use of boofcv.struct.feature.TupleDesc_F64 in project BoofCV by lessthanoptimal.
the class ExampleComputeFundamentalMatrix method computeMatches.
/**
* Use the associate point feature example to create a list of {@link AssociatedPair} for use in computing the
* fundamental matrix.
*/
public static List<AssociatedPair> computeMatches(BufferedImage left, BufferedImage right) {
DetectDescribePoint<GrayF32, TupleDesc_F64> detDesc = FactoryDetectDescribe.surfStable(new ConfigFastHessian(0, 2, 400, 1, 9, 4, 4), null, null, GrayF32.class);
// DetectDescribePoint detDesc = FactoryDetectDescribe.sift(null,new ConfigSiftDetector(2,0,200,5),null,null);
ScoreAssociation<TupleDesc_F64> scorer = FactoryAssociation.scoreEuclidean(TupleDesc_F64.class, true);
AssociateDescription<TupleDesc_F64> associate = FactoryAssociation.greedy(new ConfigAssociateGreedy(true, 0.1), scorer);
var findMatches = new ExampleAssociatePoints<>(detDesc, associate, GrayF32.class);
findMatches.associate(left, right);
List<AssociatedPair> matches = new ArrayList<>();
FastAccess<AssociatedIndex> matchIndexes = associate.getMatches();
for (int i = 0; i < matchIndexes.size; i++) {
AssociatedIndex a = matchIndexes.get(i);
var p = new AssociatedPair(findMatches.pointsA.get(a.src), findMatches.pointsB.get(a.dst));
matches.add(p);
}
return matches;
}
use of boofcv.struct.feature.TupleDesc_F64 in project BoofCV by lessthanoptimal.
the class ExampleClassifySceneKnn method main.
public static void main(String[] args) {
var surfFast = new ConfigDenseSurfFast(new DenseSampling(8, 8));
// ConfigDenseSurfStable surfStable = new ConfigDenseSurfStable(new DenseSampling(8,8));
// ConfigDenseSift sift = new ConfigDenseSift(new DenseSampling(6,6));
// ConfigDenseHoG hog = new ConfigDenseHoG();
DescribeImageDense<GrayU8, TupleDesc_F64> desc = FactoryDescribeImageDense.surfFast(surfFast, GrayU8.class);
// FactoryDescribeImageDense.surfStable(surfStable, GrayU8.class);
// FactoryDescribeImageDense.sift(sift, GrayU8.class);
// FactoryDescribeImageDense.hog(hog, ImageType.single(GrayU8.class));
var configKMeans = new ConfigKMeans();
configKMeans.maxIterations = MAX_KNN_ITERATIONS;
configKMeans.reseedAfterIterations = 20;
ComputeClusters<double[]> clusterer = FactoryClustering.kMeans_MT(configKMeans, desc.createDescription().size(), 200, double[].class);
clusterer.setVerbose(true);
// The _MT tells it to use the threaded version. This can run MUCH faster.
int pointDof = desc.createDescription().size();
NearestNeighbor<HistogramScene> nn = FactoryNearestNeighbor.exhaustive(new KdTreeHistogramScene_F64(pointDof));
ExampleClassifySceneKnn example = new ExampleClassifySceneKnn(desc, clusterer, nn);
var trainingDir = new File(UtilIO.pathExample("learning/scene/train"));
var testingDir = new File(UtilIO.pathExample("learning/scene/test"));
if (!trainingDir.exists() || !testingDir.exists()) {
String addressSrc = "http://boofcv.org/notwiki/largefiles/bow_data_v001.zip";
File dst = new File(trainingDir.getParentFile(), "bow_data_v001.zip");
try {
DeepBoofDataBaseOps.download(addressSrc, dst);
DeepBoofDataBaseOps.decompressZip(dst, dst.getParentFile(), true);
System.out.println("Download complete!");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} else {
System.out.println("Delete and download again if there are file not found errors");
System.out.println(" " + trainingDir);
System.out.println(" " + testingDir);
}
example.loadSets(trainingDir, null, testingDir);
// train the classifier
example.learnAndSave();
// now load it for evaluation purposes from the files
example.loadAndCreateClassifier();
// test the classifier on the test set
Confusion confusion = example.evaluateTest();
confusion.getMatrix().print();
System.out.println("Accuracy = " + confusion.computeAccuracy());
// Show confusion matrix
// Not the best coloration scheme... perfect = red diagonal and blue elsewhere.
ShowImages.showWindow(new ConfusionMatrixPanel(confusion.getMatrix(), example.getScenes(), 400, true), "Confusion Matrix", true);
// For SIFT descriptor the accuracy is 54.0%
// For "fast" SURF descriptor the accuracy is 52.2%
// For "stable" SURF descriptor the accuracy is 49.4%
// For HOG 53.3%
// SURF results are interesting. "Stable" is significantly better than "fast"!
// One explanation is that the descriptor for "fast" samples a smaller region than "stable", by a
// couple of pixels at scale of 1. Thus there is less overlap between the features.
// Reducing the size of "stable" to 0.95 does slightly improve performance to 50.5%, can't scale it down
// much more without performance going down
}
use of boofcv.struct.feature.TupleDesc_F64 in project BoofCV by lessthanoptimal.
the class TestDetectDescribeSurfPlanar_MT method compare_Single_to_MT.
@Test
void compare_Single_to_MT() {
Planar<GrayF32> input = new Planar<>(GrayF32.class, width, height, 3);
GImageMiscOps.addUniform(input, rand, 0, 200);
DetectDescribeSurfPlanar<GrayF32> desc_ST;
DetectDescribeSurfPlanar_MT<GrayF32> desc_MT;
{
DescribePointSurf<GrayF32> desc = new DescribePointSurf<>(GrayF32.class);
DescribePointSurfPlanar<GrayF32> descMulti = new DescribePointSurfPlanar<>(desc, 3);
FastHessianFeatureDetector<GrayF32> detector = FactoryInterestPointAlgs.fastHessian(null);
OrientationIntegral<GrayF32> orientation = FactoryOrientationAlgs.sliding_ii(null, GrayF32.class);
desc_ST = new DetectDescribeSurfPlanar<>(detector, orientation, descMulti);
}
{
DescribePointSurf<GrayF32> desc = new DescribePointSurf<>(GrayF32.class);
DescribePointSurfPlanar<GrayF32> descMulti = new DescribePointSurfPlanar<>(desc, 3);
FastHessianFeatureDetector<GrayF32> detector = FactoryInterestPointAlgs.fastHessian(null);
OrientationIntegral<GrayF32> orientation = FactoryOrientationAlgs.sliding_ii(null, GrayF32.class);
desc_MT = new DetectDescribeSurfPlanar_MT<>(detector, orientation, descMulti);
}
GrayF32 gray = ConvertImage.average(input, null);
desc_ST.detect(gray, input);
desc_MT.detect(gray, input);
assertEquals(desc_ST.getNumberOfFeatures(), desc_MT.getNumberOfFeatures());
int N = desc_ST.getNumberOfFeatures();
for (int idx_st = 0; idx_st < N; idx_st++) {
Point2D_F64 loc_st = desc_ST.getLocation(idx_st);
// order isn't guaranteed. Do an exhaustive search
boolean matched = false;
for (int idx_mt = 0; idx_mt < N; idx_mt++) {
Point2D_F64 loc_mt = desc_MT.getLocation(idx_mt);
if (loc_st.x != loc_mt.x || loc_st.y != loc_mt.y) {
continue;
}
if (desc_ST.getRadius(idx_st) != desc_MT.getRadius(idx_mt)) {
continue;
}
if (desc_ST.getOrientation(idx_st) != desc_MT.getOrientation(idx_mt)) {
continue;
}
if (desc_ST.isWhite(idx_st) != desc_MT.isWhite(idx_mt)) {
continue;
}
TupleDesc_F64 fd_st = desc_ST.getDescription(idx_st);
TupleDesc_F64 fd_mt = desc_MT.getDescription(idx_mt);
assertEquals(0, DescriptorDistance.sad(fd_st, fd_mt));
matched = true;
break;
}
assertTrue(matched, "No match " + idx_st);
}
}
use of boofcv.struct.feature.TupleDesc_F64 in project BoofCV by lessthanoptimal.
the class TestCompleteSift_MT method compareToSingleThread.
@Test
void compareToSingleThread() {
GrayF32 image = new GrayF32(300, 290);
GImageMiscOps.fillUniform(image, rand, 0, 200);
BoofConcurrency.USE_CONCURRENT = false;
CompleteSift single = FactoryDetectDescribeAlgs.sift(config);
BoofConcurrency.USE_CONCURRENT = true;
CompleteSift multi = FactoryDetectDescribeAlgs.sift(config);
single.process(image);
multi.process(image);
assertEquals(128, single.getDescriptorLength());
assertEquals(128, multi.getDescriptorLength());
assertEquals(single.getLocations().size, multi.getLocations().size);
int N = single.getLocations().size;
for (int i = 0; i < N; i++) {
ScalePoint sp = single.getLocations().get(i);
ScalePoint mp = multi.getLocations().get(i);
assertEquals(sp.intensity, mp.intensity);
assertEquals(sp.scale, mp.scale);
assertEquals(0.0, sp.pixel.distance(mp.pixel));
assertEquals(single.getOrientations().get(i), multi.getOrientations().get(i));
TupleDesc_F64 sd = single.getDescriptions().get(i);
TupleDesc_F64 md = multi.getDescriptions().get(i);
assertEquals(0.0, DescriptorDistance.euclidean(sd, md));
}
}
Aggregations