use of org.knime.base.node.mine.treeensemble2.data.memberships.DataMemberships in project knime-core by knime.
the class TreeLearnerRegression method learnSingleTree.
/**
* {@inheritDoc}
*/
@Override
public TreeModelRegression learnSingleTree(final ExecutionMonitor exec, final RandomData rd) throws CanceledExecutionException {
final TreeTargetNumericColumnData targetColumn = getTargetData();
final TreeData data = getData();
final RowSample rowSampling = getRowSampling();
final TreeEnsembleLearnerConfiguration config = getConfig();
final IDataIndexManager indexManager = getIndexManager();
DataMemberships rootDataMemberships = new RootDataMemberships(rowSampling, data, indexManager);
RegressionPriors targetPriors = targetColumn.getPriors(rootDataMemberships, config);
BitSet forbiddenColumnSet = new BitSet(data.getNrAttributes());
boolean isGradientBoosting = config instanceof GradientBoostingLearnerConfiguration;
if (isGradientBoosting) {
m_leafs = new ArrayList<TreeNodeRegression>();
}
final TreeNodeSignature rootSignature = TreeNodeSignature.ROOT_SIGNATURE;
final ColumnSample rootColumnSample = getColSamplingStrategy().getColumnSampleForTreeNode(rootSignature);
TreeNodeRegression rootNode = buildTreeNode(exec, 0, rootDataMemberships, rootColumnSample, getSignatureFactory().getRootSignature(), targetPriors, forbiddenColumnSet);
assert forbiddenColumnSet.cardinality() == 0;
rootNode.setTreeNodeCondition(TreeNodeTrueCondition.INSTANCE);
if (isGradientBoosting) {
return new TreeModelRegression(rootNode, m_leafs);
}
return new TreeModelRegression(rootNode);
}
use of org.knime.base.node.mine.treeensemble2.data.memberships.DataMemberships in project knime-core by knime.
the class TreeLearnerRegression method findBestSplitsRegression.
private SplitCandidate[] findBestSplitsRegression(final int currentDepth, final DataMemberships dataMemberships, final ColumnSample columnSample, final RegressionPriors targetPriors, final BitSet forbiddenColumnSet) {
final TreeData data = getData();
final RandomData rd = getRandomData();
final TreeEnsembleLearnerConfiguration config = getConfig();
final int maxLevels = config.getMaxLevels();
if (maxLevels != TreeEnsembleLearnerConfiguration.MAX_LEVEL_INFINITE && currentDepth >= maxLevels) {
return null;
}
final int minNodeSize = config.getMinNodeSize();
if (minNodeSize != TreeEnsembleLearnerConfiguration.MIN_NODE_SIZE_UNDEFINED) {
if (targetPriors.getNrRecords() < minNodeSize) {
return null;
}
}
final double priorSquaredDeviation = targetPriors.getSumSquaredDeviation();
if (priorSquaredDeviation < TreeColumnData.EPSILON) {
return null;
}
final TreeTargetNumericColumnData targetColumn = getTargetData();
ArrayList<SplitCandidate> splitCandidates = null;
if (currentDepth == 0 && config.getHardCodedRootColumn() != null) {
final TreeAttributeColumnData rootColumn = data.getColumn(config.getHardCodedRootColumn());
return new SplitCandidate[] { rootColumn.calcBestSplitRegression(dataMemberships, targetPriors, targetColumn, rd) };
} else {
splitCandidates = new ArrayList<SplitCandidate>(columnSample.getNumCols());
for (TreeAttributeColumnData col : columnSample) {
if (forbiddenColumnSet.get(col.getMetaData().getAttributeIndex())) {
continue;
}
SplitCandidate currentColSplit = col.calcBestSplitRegression(dataMemberships, targetPriors, targetColumn, rd);
if (currentColSplit != null) {
splitCandidates.add(currentColSplit);
}
}
}
Comparator<SplitCandidate> comp = new Comparator<SplitCandidate>() {
@Override
public int compare(final SplitCandidate arg0, final SplitCandidate arg1) {
int compareDouble = -Double.compare(arg0.getGainValue(), arg1.getGainValue());
return compareDouble;
}
};
if (splitCandidates.isEmpty()) {
return null;
}
splitCandidates.sort(comp);
return splitCandidates.toArray(new SplitCandidate[splitCandidates.size()]);
}
use of org.knime.base.node.mine.treeensemble2.data.memberships.DataMemberships in project knime-core by knime.
the class TreeNominalColumnDataTest method testCalcBestSplitClassificationMultiWay.
/**
* Tests the method
* {@link TreeNominalColumnData#calcBestSplitClassification(DataMemberships, ClassificationPriors, TreeTargetNominalColumnData, RandomData)}
* using multiway splits
*
* @throws Exception
*/
@Test
public void testCalcBestSplitClassificationMultiWay() throws Exception {
TreeEnsembleLearnerConfiguration config = createConfig(false);
config.setUseBinaryNominalSplits(false);
Pair<TreeNominalColumnData, TreeTargetNominalColumnData> tennisData = tennisData(config);
TreeNominalColumnData columnData = tennisData.getFirst();
TreeTargetNominalColumnData targetData = tennisData.getSecond();
TreeData treeData = createTreeData(tennisData);
assertEquals(SplitCriterion.Gini, config.getSplitCriterion());
double[] rowWeights = new double[SMALL_COLUMN_DATA.length];
Arrays.fill(rowWeights, 1.0);
IDataIndexManager indexManager = new DefaultDataIndexManager(treeData);
DataMemberships dataMemberships = new RootDataMemberships(rowWeights, treeData, indexManager);
ClassificationPriors priors = targetData.getDistribution(rowWeights, config);
SplitCandidate splitCandidate = columnData.calcBestSplitClassification(dataMemberships, priors, targetData, null);
assertNotNull(splitCandidate);
assertThat(splitCandidate, instanceOf(NominalMultiwaySplitCandidate.class));
assertFalse(splitCandidate.canColumnBeSplitFurther());
// manually via libre office calc
assertEquals(0.0744897959, splitCandidate.getGainValue(), 0.00001);
NominalMultiwaySplitCandidate multiWaySplitCandidate = (NominalMultiwaySplitCandidate) splitCandidate;
TreeNodeNominalCondition[] childConditions = multiWaySplitCandidate.getChildConditions();
assertEquals(3, childConditions.length);
assertEquals("S", childConditions[0].getValue());
assertEquals("O", childConditions[1].getValue());
assertEquals("R", childConditions[2].getValue());
}
use of org.knime.base.node.mine.treeensemble2.data.memberships.DataMemberships in project knime-core by knime.
the class TreeNominalColumnDataTest method testCalcBestSplitRegressionBinaryXGBoostMissingValueHandling.
/**
* Tests the XGBoost missing value handling in case of a regression with binary splits.
*
* @throws Exception
*/
@Test
public void testCalcBestSplitRegressionBinaryXGBoostMissingValueHandling() throws Exception {
final TreeEnsembleLearnerConfiguration config = createConfig(true);
config.setMissingValueHandling(MissingValueHandling.XGBoost);
final TestDataGenerator dataGen = new TestDataGenerator(config);
final String noMissingCSV = "A, A, A, B, B, B, B, C, C";
final String noMissingsTarget = "1, 2, 2, 7, 6, 5, 2, 3, 1";
TreeNominalColumnData dataCol = dataGen.createNominalAttributeColumn(noMissingCSV, "noMissings", 0);
TreeTargetNumericColumnData targetCol = TestDataGenerator.createNumericTargetColumn(noMissingsTarget);
double[] weights = new double[9];
Arrays.fill(weights, 1.0);
int[] indices = new int[9];
for (int i = 0; i < indices.length; i++) {
indices[i] = i;
}
final RandomData rd = config.createRandomData();
DataMemberships dataMemberships = new MockDataColMem(indices, indices, weights);
// first test the case that there are no missing values during training (we still need to provide a missing value direction for prediction)
SplitCandidate split = dataCol.calcBestSplitRegression(dataMemberships, targetCol.getPriors(weights, config), targetCol, rd);
assertNotNull("SplitCandidate may not be null", split);
assertThat(split, instanceOf(NominalBinarySplitCandidate.class));
assertEquals("Wrong gain.", 22.755555, split.getGainValue(), 1e-5);
assertTrue("No missing values in dataCol therefore the missedRows BitSet must be empty.", split.getMissedRows().isEmpty());
NominalBinarySplitCandidate nomSplit = (NominalBinarySplitCandidate) split;
TreeNodeNominalBinaryCondition[] conditions = nomSplit.getChildConditions();
assertEquals("Binary split candidate must have two children.", 2, conditions.length);
final String[] values = new String[] { "A", "C" };
assertArrayEquals("Wrong values in split condition.", values, conditions[0].getValues());
assertArrayEquals("Wrong values in split condition.", values, conditions[1].getValues());
assertFalse("Missings should go with majority", conditions[0].acceptsMissings());
assertTrue("Missings should go with majority", conditions[1].acceptsMissings());
assertEquals("Wrong set logic.", SetLogic.IS_NOT_IN, conditions[0].getSetLogic());
assertEquals("Wrong set logic.", SetLogic.IS_IN, conditions[1].getSetLogic());
// test the case that there are missing values during training
final String missingCSV = "A, A, A, B, B, B, B, C, C, ?";
final String missingTarget = "1, 2, 2, 7, 6, 5, 2, 3, 1, 8";
dataCol = dataGen.createNominalAttributeColumn(missingCSV, "missing", 0);
targetCol = TestDataGenerator.createNumericTargetColumn(missingTarget);
weights = new double[10];
Arrays.fill(weights, 1.0);
indices = new int[10];
for (int i = 0; i < indices.length; i++) {
indices[i] = i;
}
dataMemberships = new MockDataColMem(indices, indices, weights);
split = dataCol.calcBestSplitRegression(dataMemberships, targetCol.getPriors(weights, config), targetCol, rd);
assertNotNull("SplitCandidate may not be null.", split);
assertThat(split, instanceOf(NominalBinarySplitCandidate.class));
assertEquals("Wrong gain.", 36.1, split.getGainValue(), 1e-5);
assertTrue("Conditions should handle missing values therefore the missedRows BitSet must be empty.", split.getMissedRows().isEmpty());
nomSplit = (NominalBinarySplitCandidate) split;
conditions = nomSplit.getChildConditions();
assertEquals("Binary split candidate must have two children.", 2, conditions.length);
assertArrayEquals("Wrong values in split condition.", values, conditions[0].getValues());
assertArrayEquals("Wrong values in split condition.", values, conditions[1].getValues());
assertTrue("Missings should go with B (because there target values are similar)", conditions[0].acceptsMissings());
assertFalse("Missings should go with B (because there target values are similar)", conditions[1].acceptsMissings());
assertEquals("Wrong set logic.", SetLogic.IS_NOT_IN, conditions[0].getSetLogic());
assertEquals("Wrong set logic.", SetLogic.IS_IN, conditions[1].getSetLogic());
}
use of org.knime.base.node.mine.treeensemble2.data.memberships.DataMemberships in project knime-core by knime.
the class TreeNominalColumnDataTest method testCalcBestSplitRegressionMultiwayXGBoostMissingValueHandling.
/**
* This method tests the XGBoost missing value handling in case of a regression task and multiway splits.
*
* @throws Exception
*/
@Test
public void testCalcBestSplitRegressionMultiwayXGBoostMissingValueHandling() throws Exception {
final TreeEnsembleLearnerConfiguration config = createConfig(true);
config.setMissingValueHandling(MissingValueHandling.XGBoost);
config.setUseBinaryNominalSplits(false);
final TestDataGenerator dataGen = new TestDataGenerator(config);
final String noMissingCSV = "A, A, A, B, B, B, B, C, C";
final String noMissingsTarget = "1, 2, 2, 7, 6, 5, 2, 3, 1";
TreeNominalColumnData dataCol = dataGen.createNominalAttributeColumn(noMissingCSV, "noMissings", 0);
TreeTargetNumericColumnData targetCol = TestDataGenerator.createNumericTargetColumn(noMissingsTarget);
double[] weights = new double[9];
Arrays.fill(weights, 1.0);
int[] indices = new int[9];
for (int i = 0; i < indices.length; i++) {
indices[i] = i;
}
final RandomData rd = config.createRandomData();
DataMemberships dataMemberships = new MockDataColMem(indices, indices, weights);
// first test the case that there are no missing values during training (we still need to provide a missing value direction for prediction)
SplitCandidate split = dataCol.calcBestSplitRegression(dataMemberships, targetCol.getPriors(weights, config), targetCol, rd);
assertNotNull("SplitCandidate may not be null", split);
assertThat(split, instanceOf(NominalMultiwaySplitCandidate.class));
assertEquals("Wrong gain.", 22.888888, split.getGainValue(), 1e-5);
assertTrue("No missing values in dataCol therefore the missedRows BitSet must be empty.", split.getMissedRows().isEmpty());
NominalMultiwaySplitCandidate nomSplit = (NominalMultiwaySplitCandidate) split;
TreeNodeNominalCondition[] conditions = nomSplit.getChildConditions();
assertEquals("3 nominal values therefore there must be 3 children.", 3, conditions.length);
assertEquals("Wrong value.", "A", conditions[0].getValue());
assertEquals("Wrong value.", "B", conditions[1].getValue());
assertEquals("Wrong value.", "C", conditions[2].getValue());
assertFalse("Missings should go with majority", conditions[0].acceptsMissings());
assertTrue("Missings should go with majority", conditions[1].acceptsMissings());
assertFalse("Missings should go with majority", conditions[2].acceptsMissings());
// test the case that there are missing values during training
final String missingCSV = "A, A, A, B, B, B, B, C, C, ?";
final String missingTarget = "1, 2, 2, 7, 6, 5, 2, 3, 1, 8";
dataCol = dataGen.createNominalAttributeColumn(missingCSV, "missing", 0);
targetCol = TestDataGenerator.createNumericTargetColumn(missingTarget);
weights = new double[10];
Arrays.fill(weights, 1.0);
indices = new int[10];
for (int i = 0; i < indices.length; i++) {
indices[i] = i;
}
dataMemberships = new MockDataColMem(indices, indices, weights);
split = dataCol.calcBestSplitRegression(dataMemberships, targetCol.getPriors(weights, config), targetCol, rd);
assertNotNull("SplitCandidate may not be null.", split);
assertThat(split, instanceOf(NominalMultiwaySplitCandidate.class));
// assertEquals("Wrong gain.", 36.233333333, split.getGainValue(), 1e-5);
assertTrue("Conditions should handle missing values therefore the missedRows BitSet must be empty.", split.getMissedRows().isEmpty());
nomSplit = (NominalMultiwaySplitCandidate) split;
conditions = nomSplit.getChildConditions();
assertEquals("3 values (not counting missing values) therefore there must be 3 children.", 3, conditions.length);
assertEquals("Wrong value.", "A", conditions[0].getValue());
assertEquals("Wrong value.", "B", conditions[1].getValue());
assertEquals("Wrong value.", "C", conditions[2].getValue());
assertFalse("Missings should go with majority", conditions[0].acceptsMissings());
assertTrue("Missings should go with majority", conditions[1].acceptsMissings());
assertFalse("Missings should go with majority", conditions[2].acceptsMissings());
}
Aggregations