use of org.knime.base.node.mine.treeensemble2.learner.NumericSplitCandidate in project knime-core by knime.
the class TreeNumericColumnDataTest method testCalcBestSplitClassification.
@Test
public void testCalcBestSplitClassification() throws Exception {
TreeEnsembleLearnerConfiguration config = createConfig();
/* data from J. Fuernkranz, Uni Darmstadt:
* http://www.ke.tu-darmstadt.de/lehre/archiv/ws0809/mldm/dt.pdf */
final double[] data = asDataArray("60,70,75,85, 90, 95, 100,120,125,220");
final String[] target = asStringArray("No,No,No,Yes,Yes,Yes,No, No, No, No");
Pair<TreeOrdinaryNumericColumnData, TreeTargetNominalColumnData> exampleData = exampleData(config, data, target);
RandomData rd = config.createRandomData();
TreeNumericColumnData columnData = exampleData.getFirst();
TreeTargetNominalColumnData targetData = exampleData.getSecond();
assertEquals(SplitCriterion.Gini, config.getSplitCriterion());
double[] rowWeights = new double[data.length];
Arrays.fill(rowWeights, 1.0);
TreeData treeData = createTreeDataClassification(exampleData);
IDataIndexManager indexManager = new DefaultDataIndexManager(treeData);
DataMemberships dataMemberships = new RootDataMemberships(rowWeights, treeData, indexManager);
ClassificationPriors priors = targetData.getDistribution(rowWeights, config);
SplitCandidate splitCandidate = columnData.calcBestSplitClassification(dataMemberships, priors, targetData, rd);
assertNotNull(splitCandidate);
assertThat(splitCandidate, instanceOf(NumericSplitCandidate.class));
assertTrue(splitCandidate.canColumnBeSplitFurther());
// libre office calc
assertEquals(/*0.42 - 0.300 */
0.12, splitCandidate.getGainValue(), 0.00001);
NumericSplitCandidate numSplitCandidate = (NumericSplitCandidate) splitCandidate;
TreeNodeNumericCondition[] childConditions = numSplitCandidate.getChildConditions();
assertEquals(2, childConditions.length);
assertEquals((95.0 + 100.0) / 2.0, childConditions[0].getSplitValue(), 0.0);
assertEquals((95.0 + 100.0) / 2.0, childConditions[1].getSplitValue(), 0.0);
assertEquals(NumericOperator.LessThanOrEqual, childConditions[0].getNumericOperator());
assertEquals(NumericOperator.LargerThan, childConditions[1].getNumericOperator());
double[] childRowWeights = new double[data.length];
System.arraycopy(rowWeights, 0, childRowWeights, 0, rowWeights.length);
BitSet inChild = columnData.updateChildMemberships(childConditions[0], dataMemberships);
DataMemberships childMemberships = dataMemberships.createChildMemberships(inChild);
ClassificationPriors childTargetPriors = targetData.getDistribution(childMemberships, config);
SplitCandidate splitCandidateChild = columnData.calcBestSplitClassification(childMemberships, childTargetPriors, targetData, rd);
assertNotNull(splitCandidateChild);
assertThat(splitCandidateChild, instanceOf(NumericSplitCandidate.class));
// manually via libre office calc
assertEquals(0.5, splitCandidateChild.getGainValue(), 0.00001);
TreeNodeNumericCondition[] childConditions2 = ((NumericSplitCandidate) splitCandidateChild).getChildConditions();
assertEquals(2, childConditions2.length);
assertEquals((75.0 + 85.0) / 2.0, childConditions2[0].getSplitValue(), 0.0);
System.arraycopy(rowWeights, 0, childRowWeights, 0, rowWeights.length);
inChild = columnData.updateChildMemberships(childConditions[1], dataMemberships);
childMemberships = dataMemberships.createChildMemberships(inChild);
childTargetPriors = targetData.getDistribution(childMemberships, config);
splitCandidateChild = columnData.calcBestSplitClassification(childMemberships, childTargetPriors, targetData, rd);
assertNull(splitCandidateChild);
}
use of org.knime.base.node.mine.treeensemble2.learner.NumericSplitCandidate in project knime-core by knime.
the class TreeNumericColumnDataTest method testCalcBestSplitClassificationSplitAtEnd.
/**
* Test splits at last possible split position - even if no change in target can be observed, see example data in
* method body.
* @throws Exception
*/
@Test
public void testCalcBestSplitClassificationSplitAtEnd() throws Exception {
// Index: 1 2 3 4 5 6 7 8
// Value: 1 1|2 2 2|3 3 3
// Target: A A|A A A|A A B
double[] data = asDataArray("1,1,2,2,2,3,3,3");
String[] target = asStringArray("A,A,A,A,A,A,A,B");
TreeEnsembleLearnerConfiguration config = createConfig();
RandomData rd = config.createRandomData();
Pair<TreeOrdinaryNumericColumnData, TreeTargetNominalColumnData> exampleData = exampleData(config, data, target);
TreeNumericColumnData columnData = exampleData.getFirst();
TreeTargetNominalColumnData targetData = exampleData.getSecond();
double[] rowWeights = new double[data.length];
Arrays.fill(rowWeights, 1.0);
TreeData treeData = createTreeDataClassification(exampleData);
IDataIndexManager indexManager = new DefaultDataIndexManager(treeData);
DataMemberships dataMemberships = new RootDataMemberships(rowWeights, treeData, indexManager);
ClassificationPriors priors = targetData.getDistribution(rowWeights, config);
SplitCandidate splitCandidate = columnData.calcBestSplitClassification(dataMemberships, priors, targetData, rd);
assertNotNull(splitCandidate);
assertThat(splitCandidate, instanceOf(NumericSplitCandidate.class));
assertTrue(splitCandidate.canColumnBeSplitFurther());
// manually calculated
assertEquals(/*0.21875 - 0.166666667 */
0.05208, splitCandidate.getGainValue(), 0.001);
NumericSplitCandidate numSplitCandidate = (NumericSplitCandidate) splitCandidate;
TreeNodeNumericCondition[] childConditions = numSplitCandidate.getChildConditions();
assertEquals(2, childConditions.length);
assertEquals((2.0 + 3.0) / 2.0, childConditions[0].getSplitValue(), 0.0);
assertEquals(NumericOperator.LessThanOrEqual, childConditions[0].getNumericOperator());
double[] childRowWeights = new double[data.length];
System.arraycopy(rowWeights, 0, childRowWeights, 0, rowWeights.length);
BitSet inChild = columnData.updateChildMemberships(childConditions[0], dataMemberships);
DataMemberships childMemberships = dataMemberships.createChildMemberships(inChild);
ClassificationPriors childTargetPriors = targetData.getDistribution(childMemberships, config);
SplitCandidate splitCandidateChild = columnData.calcBestSplitClassification(childMemberships, childTargetPriors, targetData, rd);
assertNull(splitCandidateChild);
System.arraycopy(rowWeights, 0, childRowWeights, 0, rowWeights.length);
inChild = columnData.updateChildMemberships(childConditions[1], dataMemberships);
childMemberships = dataMemberships.createChildMemberships(inChild);
childTargetPriors = targetData.getDistribution(childMemberships, config);
splitCandidateChild = columnData.calcBestSplitClassification(childMemberships, childTargetPriors, targetData, null);
assertNull(splitCandidateChild);
}
use of org.knime.base.node.mine.treeensemble2.learner.NumericSplitCandidate in project knime-core by knime.
the class TreeNumericColumnDataTest method testCalcBestSplitClassificationSplitAtStart.
/**
* Test splits at last possible split position - even if no change in target can be observed, see example data in
* method body.
* @throws Exception
*/
@Test
public void testCalcBestSplitClassificationSplitAtStart() throws Exception {
// Index: 1 2 3 4 5 6 7
// Value: 1 1 1|2 2|3 3
// Target: A A A|A A|A B
double[] data = asDataArray("1,1,1,2,2,3,3");
String[] target = asStringArray("A,A,A,A,B,A,B");
TreeEnsembleLearnerConfiguration config = createConfig();
Pair<TreeOrdinaryNumericColumnData, TreeTargetNominalColumnData> exampleData = exampleData(config, data, target);
TreeNumericColumnData columnData = exampleData.getFirst();
TreeTargetNominalColumnData targetData = exampleData.getSecond();
double[] rowWeights = new double[data.length];
Arrays.fill(rowWeights, 1.0);
TreeData treeData = createTreeDataClassification(exampleData);
IDataIndexManager indexManager = new DefaultDataIndexManager(treeData);
DataMemberships dataMemberships = new RootDataMemberships(rowWeights, treeData, indexManager);
ClassificationPriors priors = targetData.getDistribution(rowWeights, config);
RandomData rd = config.createRandomData();
SplitCandidate splitCandidate = columnData.calcBestSplitClassification(dataMemberships, priors, targetData, rd);
double gain = (1.0 - Math.pow(5.0 / 7.0, 2.0) - Math.pow(2.0 / 7.0, 2.0)) - 0.0 - 4.0 / 7.0 * (1.0 - Math.pow(2.0 / 4.0, 2.0) - Math.pow(2.0 / 4.0, 2.0));
// manually calculated
assertEquals(gain, splitCandidate.getGainValue(), 0.000001);
NumericSplitCandidate numSplitCandidate = (NumericSplitCandidate) splitCandidate;
TreeNodeNumericCondition[] childConditions = numSplitCandidate.getChildConditions();
assertEquals(2, childConditions.length);
assertEquals((1.0 + 2.0) / 2.0, childConditions[0].getSplitValue(), 0.0);
}
use of org.knime.base.node.mine.treeensemble2.learner.NumericSplitCandidate in project knime-core by knime.
the class SurrogatesTest method createSplitCandidates.
private static SplitCandidate[] createSplitCandidates(final TreeEnsembleLearnerConfiguration config) {
double[] colBestData = TreeNumericColumnDataTest.asDataArray(COLBEST);
double[] col1Data = TreeNumericColumnDataTest.asDataArray(COL1);
double[] col2Data = TreeNumericColumnDataTest.asDataArray(COL2);
TreeOrdinaryNumericColumnData colBest = TreeNumericColumnDataTest.createNumericColumnData(config, colBestData, "colBest", 0);
colBest.getMetaData().setAttributeIndex(0);
TreeOrdinaryNumericColumnData col1 = TreeNumericColumnDataTest.createNumericColumnData(config, col1Data, "col1", 1);
col1.getMetaData().setAttributeIndex(1);
TreeOrdinaryNumericColumnData col2 = TreeNumericColumnDataTest.createNumericColumnData(config, col2Data, "col2", 2);
col2.getMetaData().setAttributeIndex(2);
NumericSplitCandidate splitBest = new NumericSplitCandidate(colBest, SPLITVAL, 0.5, createMissingBitSet(colBestData), NumericSplitCandidate.NO_MISSINGS);
NumericSplitCandidate split1 = new NumericSplitCandidate(col1, SPLITVAL, 0.25, createMissingBitSet(col1Data), NumericSplitCandidate.NO_MISSINGS);
NumericSplitCandidate split2 = new NumericSplitCandidate(col2, SPLITVAL, 0.1, createMissingBitSet(col2Data), NumericSplitCandidate.NO_MISSINGS);
return new SplitCandidate[] { splitBest, split1, split2 };
}
use of org.knime.base.node.mine.treeensemble2.learner.NumericSplitCandidate in project knime-core by knime.
the class TreeNumericColumnData method calcBestSplitRegression.
@Override
public SplitCandidate calcBestSplitRegression(final DataMemberships dataMemberships, final RegressionPriors targetPriors, final TreeTargetNumericColumnData targetColumn, final RandomData rd) {
final TreeEnsembleLearnerConfiguration config = getConfiguration();
final boolean useAverageSplitPoints = config.isUseAverageSplitPoints();
final int minChildNodeSize = config.getMinChildSize();
// get columnMemberships
final ColumnMemberships columnMemberships = dataMemberships.getColumnMemberships(getMetaData().getAttributeIndex());
final int lengthNonMissing = getLengthNonMissing();
// missing value handling
final boolean useXGBoostMissingValueHandling = config.getMissingValueHandling() == MissingValueHandling.XGBoost;
// are there missing values in this column (complete column)
boolean branchContainsMissingValues = containsMissingValues();
boolean missingsGoLeft = true;
double missingWeight = 0.0;
double missingY = 0.0;
// check if there are missing values in this rowsample
if (branchContainsMissingValues) {
columnMemberships.goToLast();
while (columnMemberships.getIndexInColumn() >= lengthNonMissing) {
missingWeight += columnMemberships.getRowWeight();
missingY += targetColumn.getValueFor(columnMemberships.getOriginalIndex());
if (!columnMemberships.previous()) {
break;
}
}
columnMemberships.reset();
branchContainsMissingValues = missingWeight > 0.0;
}
final double ySumTotal = targetPriors.getYSum() - missingY;
final double nrRecordsTotal = targetPriors.getNrRecords() - missingWeight;
final double criterionTotal = useXGBoostMissingValueHandling ? (ySumTotal + missingY) * (ySumTotal + missingY) / (nrRecordsTotal + missingWeight) : ySumTotal * ySumTotal / nrRecordsTotal;
double ySumLeft = 0.0;
double nrRecordsLeft = 0.0;
double ySumRight = ySumTotal;
double nrRecordsRight = nrRecordsTotal;
// all values in the current branch are missing
if (nrRecordsRight == 0) {
// it is impossible to determine a split
return null;
}
double bestSplit = Double.NEGATIVE_INFINITY;
double bestImprovement = 0.0;
double lastSeenY = Double.NaN;
double lastSeenValue = Double.NEGATIVE_INFINITY;
double lastSeenWeight = -1.0;
// compute the gain, keep the one that maximizes the split
while (columnMemberships.next()) {
final double weight = columnMemberships.getRowWeight();
if (weight < EPSILON) {
// ignore record: not in current branch or not in sample
continue;
} else if (Math.floor(weight) != weight) {
throw new UnsupportedOperationException("weighted records (missing values?) not supported, " + "weight is " + weight);
}
final double value = getSorted(columnMemberships.getIndexInColumn());
if (lastSeenWeight > 0.0) {
ySumLeft += lastSeenWeight * lastSeenY;
ySumRight -= lastSeenWeight * lastSeenY;
nrRecordsLeft += lastSeenWeight;
nrRecordsRight -= lastSeenWeight;
if (nrRecordsLeft >= minChildNodeSize && nrRecordsRight >= minChildNodeSize && lastSeenValue < value) {
boolean tempMissingsGoLeft = true;
double childrenSquaredSum;
if (branchContainsMissingValues && useXGBoostMissingValueHandling) {
final double[] tempChildrenSquaredSum = new double[2];
tempChildrenSquaredSum[0] = ((ySumLeft + missingY) * (ySumLeft + missingY) / (nrRecordsLeft + missingWeight)) + (ySumRight * ySumRight / nrRecordsRight);
tempChildrenSquaredSum[1] = (ySumLeft * ySumLeft / nrRecordsLeft) + ((ySumRight + missingY) * (ySumRight + missingY) / (nrRecordsRight + missingWeight));
if (tempChildrenSquaredSum[0] >= tempChildrenSquaredSum[1]) {
childrenSquaredSum = tempChildrenSquaredSum[0];
tempMissingsGoLeft = true;
} else {
childrenSquaredSum = tempChildrenSquaredSum[1];
tempMissingsGoLeft = false;
}
} else {
childrenSquaredSum = (ySumLeft * ySumLeft / nrRecordsLeft) + (ySumRight * ySumRight / nrRecordsRight);
}
double criterion = childrenSquaredSum - criterionTotal;
boolean randomTieBreaker = criterion == bestImprovement ? rd.nextInt(0, 1) == 1 : false;
if (criterion > bestImprovement || randomTieBreaker) {
bestImprovement = criterion;
bestSplit = useAverageSplitPoints ? getCenter(lastSeenValue, value) : lastSeenValue;
// if there are no missing values go with majority
missingsGoLeft = branchContainsMissingValues ? tempMissingsGoLeft : nrRecordsLeft >= nrRecordsRight;
}
}
}
lastSeenY = targetColumn.getValueFor(columnMemberships.getOriginalIndex());
lastSeenValue = value;
lastSeenWeight = weight;
}
// + " but was " + lastSeenY * lastSeenWeight;
if (bestImprovement > 0.0) {
if (useXGBoostMissingValueHandling) {
// return new NumericMissingSplitCandidate(this, bestSplit, bestImprovement, missingsGoLeft);
return new NumericSplitCandidate(this, bestSplit, bestImprovement, new BitSet(), missingsGoLeft ? NumericSplitCandidate.MISSINGS_GO_LEFT : NumericSplitCandidate.MISSINGS_GO_RIGHT);
}
return new NumericSplitCandidate(this, bestSplit, bestImprovement, getMissedRows(columnMemberships), NumericSplitCandidate.NO_MISSINGS);
} else {
return null;
}
}
Aggregations