use of org.apache.ignite.ml.dataset.feature.extractor.Vectorizer in project ignite by apache.
the class TrainingWithCustomPreprocessorsExample method main.
/**
* Run example.
*
* @param args Command line arguments.
* @throws Exception Exception.
*/
public static void main(String[] args) throws Exception {
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
IgniteCache<Integer, Vector> trainingSet = null;
try {
trainingSet = new SandboxMLCache(ignite).fillCacheWith(MLSandboxDatasets.BOSTON_HOUSE_PRICES);
Vectorizer<Integer, Vector, Integer, Double> basicVectorizer = new DummyVectorizer<Integer>().labeled(Vectorizer.LabelCoordinate.FIRST);
Preprocessor<Integer, Vector> imputingPreprocessor = new ImputerTrainer<Integer, Vector>().fit(ignite, trainingSet, basicVectorizer);
// In-place definition of custom preprocessor by lambda expression.
Preprocessor<Integer, Vector> customPreprocessor = (k, v) -> {
LabeledVector res = imputingPreprocessor.apply(k, v);
double fifthFeature = res.features().get(5);
Vector updatedVector = res.features().set(5, fifthFeature > 0 ? Math.log(fifthFeature) : -1);
return updatedVector.labeled(res.label());
};
Vectorizer9000 customVectorizer = new Vectorizer9000(customPreprocessor);
PipelineMdl<Integer, Vector> mdl = new Pipeline<Integer, Vector, Integer, Double>().addVectorizer(customVectorizer).addPreprocessingTrainer(new MinMaxScalerTrainer<Integer, Vector>()).addPreprocessingTrainer(new NormalizationTrainer<Integer, Vector>().withP(1)).addPreprocessingTrainer(getCustomTrainer()).addTrainer(new DecisionTreeClassificationTrainer(5, 0)).fit(ignite, trainingSet);
System.out.println(">>> Perform scoring.");
double score = Evaluator.evaluate(trainingSet, mdl, mdl.getPreprocessor(), MetricName.R2);
System.out.println(">>> R^2 score: " + score);
} finally {
if (trainingSet != null)
trainingSet.destroy();
}
} finally {
System.out.flush();
}
}
use of org.apache.ignite.ml.dataset.feature.extractor.Vectorizer in project ignite by apache.
the class AlgorithmSpecificDatasetExample method main.
/**
* Run example.
*/
public static void main(String[] args) throws Exception {
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
System.out.println(">>> Algorithm Specific Dataset example started.");
IgniteCache<Integer, Vector> persons = null;
try {
persons = createCache(ignite);
Vectorizer<Integer, Vector, Integer, Double> vectorizer = new DummyVectorizer<>(1);
IgniteFunction<LabeledVector<Double>, LabeledVector<double[]>> func = lv -> new LabeledVector<>(lv.features(), new double[] { lv.label() });
// NOTE: This class is part of Developer API and all lambdas should be loaded on server manually.
Preprocessor<Integer, Vector> preprocessor = new PatchedPreprocessor<>(func, vectorizer);
// Creates a algorithm specific dataset to perform linear regression. Here we define the way features and
// labels are extracted, and partition data and context are created.
SimpleLabeledDatasetDataBuilder<Integer, Vector, AlgorithmSpecificPartitionContext> builder = new SimpleLabeledDatasetDataBuilder<>(preprocessor);
IgniteBiFunction<SimpleLabeledDatasetData, AlgorithmSpecificPartitionContext, SimpleLabeledDatasetData> builderFun = (data, ctx) -> {
double[] features = data.getFeatures();
int rows = data.getRows();
// Makes a copy of features to supplement it by columns with values equal to 1.0.
double[] a = new double[features.length + rows];
Arrays.fill(a, 1.0);
System.arraycopy(features, 0, a, rows, features.length);
return new SimpleLabeledDatasetData(a, data.getLabels(), rows);
};
try (AlgorithmSpecificDataset dataset = DatasetFactory.create(ignite, persons, (env, upstream, upstreamSize) -> new AlgorithmSpecificPartitionContext(), builder.andThen(builderFun)).wrap(AlgorithmSpecificDataset::new)) {
// Trains linear regression model using gradient descent.
double[] linearRegressionMdl = new double[2];
for (int i = 0; i < 1000; i++) {
double[] gradient = dataset.gradient(linearRegressionMdl);
if (BLAS.getInstance().dnrm2(gradient.length, gradient, 1) < 1e-4)
break;
for (int j = 0; j < gradient.length; j++) linearRegressionMdl[j] -= 0.1 / persons.size() * gradient[j];
}
System.out.println("Linear Regression Model: " + Arrays.toString(linearRegressionMdl));
}
System.out.println(">>> Algorithm Specific Dataset example completed.");
} finally {
persons.destroy();
}
} finally {
System.out.flush();
}
}
use of org.apache.ignite.ml.dataset.feature.extractor.Vectorizer in project ignite by apache.
the class MLDeployingTest method createVectorizer.
/**
*/
private Vectorizer<Integer, Vector, Integer, Double> createVectorizer() throws ClassNotFoundException, NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException {
ClassLoader ldr = getExternalClassLoader();
Class<?> clazz = ldr.loadClass(EXT_VECTORIZER);
Constructor ctor = clazz.getConstructor();
Vectorizer<Integer, Vector, Integer, Double> vectorizer = (Vectorizer<Integer, Vector, Integer, Double>) ctor.newInstance();
vectorizer = vectorizer.labeled(Vectorizer.LabelCoordinate.LAST);
return vectorizer;
}
use of org.apache.ignite.ml.dataset.feature.extractor.Vectorizer in project ignite by apache.
the class CustomersClusterizationExample method computeMeanEntropy.
/**
* Computes mean entropy in clusters.
*
* @param cache Dataset cache.
* @param filter Test dataset filter.
* @param vectorizer Upstream vectorizer.
* @param mdl KMeans model.
* @return Score.
*/
private static double computeMeanEntropy(IgniteCache<Integer, Vector> cache, IgniteBiPredicate<Integer, Vector> filter, Vectorizer<Integer, Vector, Integer, Double> vectorizer, KMeansModel mdl) {
Map<Integer, Map<Integer, AtomicInteger>> clusterUniqueLbCounts = new HashMap<>();
try (QueryCursor<Cache.Entry<Integer, Vector>> cursor = cache.query(new ScanQuery<>(filter))) {
for (Cache.Entry<Integer, Vector> ent : cursor) {
LabeledVector<Double> vec = vectorizer.apply(ent.getKey(), ent.getValue());
int cluster = mdl.predict(vec.features());
int ch = vec.label().intValue();
if (!clusterUniqueLbCounts.containsKey(cluster))
clusterUniqueLbCounts.put(cluster, new HashMap<>());
if (!clusterUniqueLbCounts.get(cluster).containsKey(ch))
clusterUniqueLbCounts.get(cluster).put(ch, new AtomicInteger());
clusterUniqueLbCounts.get(cluster).get(ch).incrementAndGet();
}
}
double sumOfClusterEntropies = 0.0;
for (Integer cluster : clusterUniqueLbCounts.keySet()) {
Map<Integer, AtomicInteger> lbCounters = clusterUniqueLbCounts.get(cluster);
int sizeOfCluster = lbCounters.values().stream().mapToInt(AtomicInteger::get).sum();
double entropyInCluster = lbCounters.values().stream().mapToDouble(AtomicInteger::get).map(lblsCount -> lblsCount / sizeOfCluster).map(lblProb -> -lblProb * Math.log(lblProb)).sum();
sumOfClusterEntropies += entropyInCluster;
}
return sumOfClusterEntropies / clusterUniqueLbCounts.size();
}
Aggregations