use of org.knime.core.data.DataRow in project knime-core by knime.
the class TreeEnsembleClassificationPredictorCellFactory method getCells.
/**
* {@inheritDoc}
*/
@Override
public DataCell[] getCells(final DataRow row) {
TreeEnsembleModelPortObject modelObject = m_predictor.getModelObject();
TreeEnsemblePredictorConfiguration cfg = m_predictor.getConfiguration();
final TreeEnsembleModel ensembleModel = modelObject.getEnsembleModel();
int size = 1;
final boolean appendConfidence = cfg.isAppendPredictionConfidence();
if (appendConfidence) {
size += 1;
}
final boolean appendClassConfidences = cfg.isAppendClassConfidences();
if (appendClassConfidences) {
size += m_targetValueMap.size();
}
final boolean appendModelCount = cfg.isAppendModelCount();
if (appendModelCount) {
size += 1;
}
final boolean hasOutOfBagFilter = m_predictor.hasOutOfBagFilter();
DataCell[] result = new DataCell[size];
DataRow filterRow = new FilterColumnRow(row, m_learnColumnInRealDataIndices);
PredictorRecord record = ensembleModel.createPredictorRecord(filterRow, m_learnSpec);
if (record == null) {
// missing value
Arrays.fill(result, DataType.getMissingCell());
return result;
}
final Voting voting = m_votingFactory.createVoting();
final int nrModels = ensembleModel.getNrModels();
int nrValidModels = 0;
for (int i = 0; i < nrModels; i++) {
if (hasOutOfBagFilter && m_predictor.isRowPartOfTrainingData(row.getKey(), i)) {
// ignore, row was used to train the model
} else {
TreeModelClassification m = ensembleModel.getTreeModelClassification(i);
TreeNodeClassification match = m.findMatchingNode(record);
voting.addVote(match);
nrValidModels += 1;
}
}
final NominalValueRepresentation[] targetVals = ((TreeTargetNominalColumnMetaData) ensembleModel.getMetaData().getTargetMetaData()).getValues();
String majorityClass = voting.getMajorityClass();
int index = 0;
if (majorityClass == null) {
assert nrValidModels == 0;
Arrays.fill(result, DataType.getMissingCell());
index = size - 1;
} else {
result[index++] = m_targetValueMap.get(majorityClass);
// final float[] distribution = voting.getClassProbabilities();
if (appendConfidence) {
result[index++] = new DoubleCell(voting.getClassProbabilityForClass(majorityClass));
}
if (appendClassConfidences) {
for (String targetValue : m_targetValueMap.keySet()) {
result[index++] = new DoubleCell(voting.getClassProbabilityForClass(targetValue));
}
}
}
if (appendModelCount) {
result[index++] = new IntCell(voting.getNrVotes());
}
return result;
}
use of org.knime.core.data.DataRow in project knime-core by knime.
the class GradientBoostingPredictorCellFactory method getCell.
/**
* {@inheritDoc}
*/
@Override
public DataCell getCell(final DataRow row) {
DataRow filterRow = new FilterColumnRow(row, m_learnColumnInRealDataIndices);
double prediction = m_model.predict(m_model.createPredictorRecord(filterRow, m_learnSpec));
return new DoubleCell(prediction);
}
use of org.knime.core.data.DataRow in project knime-core by knime.
the class TreeEnsembleClassificationPredictorCellFactory2 method getCells.
/**
* {@inheritDoc}
*/
@Override
public DataCell[] getCells(final DataRow row) {
TreeEnsembleModelPortObject modelObject = m_predictor.getModelObject();
TreeEnsemblePredictorConfiguration cfg = m_predictor.getConfiguration();
final TreeEnsembleModel ensembleModel = modelObject.getEnsembleModel();
int size = 1;
final boolean appendConfidence = cfg.isAppendPredictionConfidence();
if (appendConfidence) {
size += 1;
}
final boolean appendClassConfidences = cfg.isAppendClassConfidences();
if (appendClassConfidences) {
size += m_targetValueMap.size();
}
final boolean appendModelCount = cfg.isAppendModelCount();
if (appendModelCount) {
size += 1;
}
final boolean hasOutOfBagFilter = m_predictor.hasOutOfBagFilter();
DataCell[] result = new DataCell[size];
DataRow filterRow = new FilterColumnRow(row, m_learnColumnInRealDataIndices);
PredictorRecord record = ensembleModel.createPredictorRecord(filterRow, m_learnSpec);
if (record == null) {
// missing value
Arrays.fill(result, DataType.getMissingCell());
return result;
}
OccurrenceCounter<String> counter = new OccurrenceCounter<String>();
final int nrModels = ensembleModel.getNrModels();
TreeTargetNominalColumnMetaData targetMeta = (TreeTargetNominalColumnMetaData) ensembleModel.getMetaData().getTargetMetaData();
final double[] classProbabilities = new double[targetMeta.getValues().length];
int nrValidModels = 0;
for (int i = 0; i < nrModels; i++) {
if (hasOutOfBagFilter && m_predictor.isRowPartOfTrainingData(row.getKey(), i)) {
// ignore, row was used to train the model
} else {
TreeModelClassification m = ensembleModel.getTreeModelClassification(i);
TreeNodeClassification match = m.findMatchingNode(record);
String majorityClassName = match.getMajorityClassName();
final float[] nodeClassProbs = match.getTargetDistribution();
double instancesInNode = 0;
for (int c = 0; c < nodeClassProbs.length; c++) {
instancesInNode += nodeClassProbs[c];
}
for (int c = 0; c < classProbabilities.length; c++) {
classProbabilities[c] += nodeClassProbs[c] / instancesInNode;
}
counter.add(majorityClassName);
nrValidModels += 1;
}
}
String bestValue = counter.getMostFrequent();
int index = 0;
if (bestValue == null) {
assert nrValidModels == 0;
Arrays.fill(result, DataType.getMissingCell());
index = size - 1;
} else {
// result[index++] = m_targetValueMap.get(bestValue);
int indexBest = -1;
double probBest = -1;
for (int c = 0; c < classProbabilities.length; c++) {
double prob = classProbabilities[c];
if (prob > probBest) {
probBest = prob;
indexBest = c;
}
}
result[index++] = new StringCell(targetMeta.getValues()[indexBest].getNominalValue());
if (appendConfidence) {
// final int freqValue = counter.getFrequency(bestValue);
// result[index++] = new DoubleCell(freqValue / (double)nrValidModels);
result[index++] = new DoubleCell(probBest);
}
if (appendClassConfidences) {
for (NominalValueRepresentation nomVal : targetMeta.getValues()) {
double prob = classProbabilities[nomVal.getAssignedInteger()] / nrValidModels;
result[index++] = new DoubleCell(prob);
}
}
}
if (appendModelCount) {
result[index++] = new IntCell(nrValidModels);
}
return result;
}
use of org.knime.core.data.DataRow in project knime-core by knime.
the class RuleNodeFactory method like.
/**
* Returns a new like not that tries to match a wildcard expression in a
* column to a fixed string value.
*
* @param value a fixed value
* @param col the column's index whose contents are interpreted as wildcard
* patterns
*
* @return a new like node
*/
public static RuleNode like(final String value, final int col) {
return new RuleNode() {
@Override
public boolean evaluate(final DataRow row) {
DataCell c = row.getCell(col);
if (c.isMissing()) {
return false;
}
String regex = WildcardMatcher.wildcardToRegex(c.toString());
return value.matches(regex);
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return " \"" + value + "\" " + Operators.LIKE + "$" + col + "$";
}
};
}
use of org.knime.core.data.DataRow in project knime-core by knime.
the class MissingValueHandling2TableIterator method next.
/**
* {@inheritDoc}
*/
@Override
public DataRow next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
DataRow result = m_next;
push();
return result;
}
Aggregations