use of org.locationtech.geowave.analytic.mapreduce.clustering.runner.MultiLevelJumpKMeansClusteringJobRunner in project geowave by locationtech.
the class KmeansJumpCommand method execute.
@Override
public void execute(final OperationParams params) throws Exception {
// Ensure we have all the required arguments
if (parameters.size() != 1) {
throw new ParameterException("Requires arguments: <storename>");
}
final String inputStoreName = parameters.get(0);
// Config file
final File configFile = getGeoWaveConfigFile(params);
if (commonOptions.getMapReduceHdfsHostPort() == null) {
final Properties configProperties = ConfigOptions.loadProperties(configFile);
final String hdfsFSUrl = ConfigHDFSCommand.getHdfsUrl(configProperties);
commonOptions.setMapReduceHdfsHostPort(hdfsFSUrl);
}
// Attempt to load store.
inputStoreOptions = CLIUtils.loadStore(inputStoreName, configFile, params.getConsole());
// Save a reference to the store in the property management.
final PersistableStore persistedStore = new PersistableStore(inputStoreOptions);
final PropertyManagement properties = new PropertyManagement();
properties.store(StoreParameters.StoreParam.INPUT_STORE, persistedStore);
// Convert properties from DBScanOptions and CommonOptions
final PropertyManagementConverter converter = new PropertyManagementConverter(properties);
converter.readProperties(commonOptions);
converter.readProperties(kmeansCommonOptions);
converter.readProperties(kmeansJumpOptions);
properties.store(Extract.QUERY, commonOptions.buildQuery());
final MultiLevelJumpKMeansClusteringJobRunner runner = new MultiLevelJumpKMeansClusteringJobRunner();
final int status = runner.run(properties);
if (status != 0) {
throw new RuntimeException("Failed to execute: " + status);
}
}
use of org.locationtech.geowave.analytic.mapreduce.clustering.runner.MultiLevelJumpKMeansClusteringJobRunner in project geowave by locationtech.
the class GeoWaveKMeansIT method runKJumpPlusPlus.
private void runKJumpPlusPlus(final QueryConstraints query) throws Exception {
final MultiLevelJumpKMeansClusteringJobRunner jobRunner2 = new MultiLevelJumpKMeansClusteringJobRunner();
final int res2 = jobRunner2.run(MapReduceTestUtils.getConfiguration(), new PropertyManagement(new ParameterEnum[] { ExtractParameters.Extract.QUERY, ExtractParameters.Extract.MIN_INPUT_SPLIT, ExtractParameters.Extract.MAX_INPUT_SPLIT, ClusteringParameters.Clustering.ZOOM_LEVELS, ExtractParameters.Extract.OUTPUT_DATA_TYPE_ID, StoreParam.INPUT_STORE, GlobalParameters.Global.BATCH_ID, MapReduceParameters.MRConfig.HDFS_BASE_DIR, JumpParameters.Jump.RANGE_OF_CENTROIDS, JumpParameters.Jump.KPLUSPLUS_MIN, ClusteringParameters.Clustering.MAX_ITERATIONS }, new Object[] { QueryBuilder.newBuilder().constraints(query).build(), MapReduceTestUtils.MIN_INPUT_SPLITS, MapReduceTestUtils.MAX_INPUT_SPLITS, 2, "centroid", new PersistableStore(dataStorePluginOptions), "bx2", TestUtils.TEMP_DIR + File.separator + MapReduceTestEnvironment.HDFS_BASE_DIRECTORY + "/t2", new NumericRange(4, 7), 5, 2 }));
Assert.assertEquals(0, res2);
final DataStore dataStore = dataStorePluginOptions.createDataStore();
final IndexStore indexStore = dataStorePluginOptions.createIndexStore();
final PersistentAdapterStore adapterStore = dataStorePluginOptions.createAdapterStore();
final InternalAdapterStore internalAdapterStore = dataStorePluginOptions.createInternalAdapterStore();
final int jumpRresultCounLevel1 = countResults(dataStore, indexStore, adapterStore, internalAdapterStore, "bx2", 1, 1);
final int jumpRresultCounLevel2 = countResults(dataStore, indexStore, adapterStore, internalAdapterStore, "bx2", 2, jumpRresultCounLevel1);
Assert.assertTrue(jumpRresultCounLevel1 >= 2);
Assert.assertTrue(jumpRresultCounLevel2 >= 2);
// for travis-ci to run, we want to limit the memory consumption
System.gc();
}
Aggregations