use of org.apache.ignite.ml.preprocessing.encoding.target.TargetEncoderPreprocessor in project ignite by apache.
the class EncoderTrainer method fit.
/**
* {@inheritDoc}
*/
@Override
public EncoderPreprocessor<K, V> fit(LearningEnvironmentBuilder envBuilder, DatasetBuilder<K, V> datasetBuilder, Preprocessor<K, V> basePreprocessor) {
if (handledIndices.isEmpty() && encoderType != EncoderType.LABEL_ENCODER)
throw new RuntimeException("Add indices of handled features");
try (Dataset<EmptyContext, EncoderPartitionData> dataset = datasetBuilder.build(envBuilder, (env, upstream, upstreamSize) -> new EmptyContext(), (env, upstream, upstreamSize, ctx) -> {
EncoderPartitionData partData = new EncoderPartitionData();
if (encoderType == EncoderType.LABEL_ENCODER) {
Map<String, Integer> lbFrequencies = null;
while (upstream.hasNext()) {
UpstreamEntry<K, V> entity = upstream.next();
LabeledVector<Double> row = basePreprocessor.apply(entity.getKey(), entity.getValue());
lbFrequencies = updateLabelFrequenciesForNextRow(row, lbFrequencies);
}
partData.withLabelFrequencies(lbFrequencies);
} else if (encoderType == EncoderType.TARGET_ENCODER) {
TargetCounter[] targetCounter = null;
while (upstream.hasNext()) {
UpstreamEntry<K, V> entity = upstream.next();
LabeledVector<Double> row = basePreprocessor.apply(entity.getKey(), entity.getValue());
targetCounter = updateTargetCountersForNextRow(row, targetCounter);
}
partData.withTargetCounters(targetCounter);
} else {
// This array will contain not null values for handled indices
Map<String, Integer>[] categoryFrequencies = null;
while (upstream.hasNext()) {
UpstreamEntry<K, V> entity = upstream.next();
LabeledVector<Double> row = basePreprocessor.apply(entity.getKey(), entity.getValue());
categoryFrequencies = updateFeatureFrequenciesForNextRow(row, categoryFrequencies);
}
partData.withCategoryFrequencies(categoryFrequencies);
}
return partData;
}, learningEnvironment(basePreprocessor))) {
switch(encoderType) {
case ONE_HOT_ENCODER:
return new OneHotEncoderPreprocessor<>(calculateEncodingValuesByFrequencies(dataset), basePreprocessor, handledIndices);
case STRING_ENCODER:
return new StringEncoderPreprocessor<>(calculateEncodingValuesByFrequencies(dataset), basePreprocessor, handledIndices);
case LABEL_ENCODER:
return new LabelEncoderPreprocessor<>(calculateEncodingValuesForLabelsByFrequencies(dataset), basePreprocessor);
case FREQUENCY_ENCODER:
return new FrequencyEncoderPreprocessor<>(calculateEncodingFrequencies(dataset), basePreprocessor, handledIndices);
case TARGET_ENCODER:
return new TargetEncoderPreprocessor<>(calculateTargetEncodingFrequencies(dataset), basePreprocessor, handledIndices);
default:
throw new IllegalStateException("Define the type of the resulting prerocessor.");
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.ignite.ml.preprocessing.encoding.target.TargetEncoderPreprocessor in project ignite by apache.
the class TargetEncoderPreprocessorTest method testApply.
/**
* Tests {@code apply()} method.
*/
@Test
public void testApply() {
Vector[] data = new Vector[] { new DenseVector(new Serializable[] { "1", "Moscow", "A" }), new DenseVector(new Serializable[] { "2", "Moscow", "B" }), new DenseVector(new Serializable[] { "3", "Moscow", "B" }) };
Vectorizer<Integer, Vector, Integer, Double> vectorizer = new DummyVectorizer<>(0, 1, 2);
TargetEncoderPreprocessor<Integer, Vector> preprocessor = new TargetEncoderPreprocessor<>(new TargetEncodingMeta[] { // feature 0
new TargetEncodingMeta().withGlobalMean(0.5).withCategoryMean(new HashMap<String, Double>() {
{
// category "1" avg mean = 1.0
put("1", 1.0);
// category "2" avg mean = 0.0
put("2", 0.0);
}
}), // feature 1
new TargetEncodingMeta().withGlobalMean(0.1).withCategoryMean(new HashMap<String, Double>() {
}), // feature 2
new TargetEncodingMeta().withGlobalMean(0.1).withCategoryMean(new HashMap<String, Double>() {
{
// category "A" avg mean 1.0
put("A", 1.0);
// category "B" avg mean 2.0
put("B", 2.0);
}
}) }, vectorizer, new HashSet<Integer>() {
{
add(0);
add(1);
add(2);
}
});
double[][] postProcessedData = new double[][] { { // "1" contains in dict => use category mean 1.0
1.0, // "Moscow" not contains in dict => use global 0.1
0.1, // "A" contains in dict => use category mean 1.0
1.0 }, { // "2" contains in dict => use category mean 0.0
0.0, // "Moscow" not contains in dict => use global 0.1
0.1, // "B" contains in dict => use category mean 2.0
2.0 }, { // "3" not contains in dict => use global mean 0.5
0.5, // "Moscow" not contains in dict => use global 0.1
0.1, // "B" contains in dict => use category mean 2.0
2.0 } };
for (int i = 0; i < data.length; i++) assertArrayEquals(postProcessedData[i], preprocessor.apply(i, data[i]).features().asArray(), 1e-8);
}
Aggregations