use of org.kie.kogito.explainability.model.Type in project kogito-apps by kiegroup.
the class DataUtilsTest method testDropLinearizedFeature.
@Test
void testDropLinearizedFeature() {
for (Type t : Type.values()) {
Feature target = TestUtils.getMockedFeature(t, new Value(1d));
List<Feature> features = new LinkedList<>();
features.add(TestUtils.getMockedNumericFeature());
features.add(target);
features.add(TestUtils.getMockedTextFeature("foo bar"));
features.add(TestUtils.getMockedNumericFeature());
Feature source = FeatureFactory.newCompositeFeature("composite", features);
Feature newFeature = DataUtils.dropOnLinearizedFeatures(target, source);
assertNotEquals(source, newFeature);
}
}
use of org.kie.kogito.explainability.model.Type in project kogito-apps by kiegroup.
the class DataUtilsTest method testDropFeature.
@Test
void testDropFeature() {
for (Type t : Type.values()) {
Feature target = TestUtils.getMockedFeature(t, new Value(1d));
List<Feature> features = new LinkedList<>();
features.add(TestUtils.getMockedNumericFeature());
features.add(target);
features.add(TestUtils.getMockedTextFeature("foo bar"));
features.add(TestUtils.getMockedNumericFeature());
List<Feature> newFeatures = DataUtils.dropFeature(features, target);
assertNotEquals(features, newFeatures);
}
}
use of org.kie.kogito.explainability.model.Type in project kogito-apps by kiegroup.
the class DataUtils method dropFeature.
/**
* Drop a given feature from a list of existing features.
*
* @param features the existing features
* @param target the feature to drop
* @return a new list of features having the target feature dropped
*/
public static List<Feature> dropFeature(List<Feature> features, Feature target) {
List<Feature> newList = new ArrayList<>(features.size());
for (Feature sourceFeature : features) {
String sourceFeatureName = sourceFeature.getName();
Type sourceFeatureType = sourceFeature.getType();
Value sourceFeatureValue = sourceFeature.getValue();
Feature f;
if (target.getName().equals(sourceFeatureName)) {
if (target.getType().equals(sourceFeatureType) && target.getValue().equals(sourceFeatureValue)) {
Value droppedValue = sourceFeatureType.drop(sourceFeatureValue);
f = FeatureFactory.copyOf(sourceFeature, droppedValue);
} else {
f = dropOnLinearizedFeatures(target, sourceFeature);
}
} else if (Type.COMPOSITE.equals(sourceFeatureType)) {
List<Feature> nestedFeatures = (List<Feature>) sourceFeatureValue.getUnderlyingObject();
f = FeatureFactory.newCompositeFeature(sourceFeatureName, dropFeature(nestedFeatures, target));
} else {
// not found
f = FeatureFactory.copyOf(sourceFeature, sourceFeatureValue);
}
newList.add(f);
}
return newList;
}
use of org.kie.kogito.explainability.model.Type in project kogito-apps by kiegroup.
the class ExplainabilityMetrics method classificationFidelity.
/**
* Calculate fidelity (accuracy) of boolean classification outputs using saliency predictor function = sign(sum(saliency.scores))
* See papers:
* - Guidotti Riccardo, et al. "A survey of methods for explaining black box models." ACM computing surveys (2018).
* - Bodria, Francesco, et al. "Explainability Methods for Natural Language Processing: Applications to Sentiment Analysis (Discussion Paper)."
*
* @param pairs pairs composed by the saliency and the related prediction
* @return the fidelity accuracy
*/
public static double classificationFidelity(List<Pair<Saliency, Prediction>> pairs) {
double acc = 0;
double evals = 0;
for (Pair<Saliency, Prediction> pair : pairs) {
Saliency saliency = pair.getLeft();
Prediction prediction = pair.getRight();
for (Output output : prediction.getOutput().getOutputs()) {
Type type = output.getType();
if (Type.BOOLEAN.equals(type)) {
double predictorOutput = saliency.getPerFeatureImportance().stream().map(FeatureImportance::getScore).mapToDouble(d -> d).sum();
double v = output.getValue().asNumber();
if ((v >= 0 && predictorOutput >= 0) || (v < 0 && predictorOutput < 0)) {
acc++;
}
evals++;
}
}
}
return evals == 0 ? 0 : acc / evals;
}
use of org.kie.kogito.explainability.model.Type in project kogito-apps by kiegroup.
the class PartialDependencePlotExplainer method collapseMarginalImpacts.
/**
* Collapse value counts into marginal impacts.
* For numbers ({@code Type.NUMBER.equals(type))} this is just the average of each value at each feature value.
* For all other types the final {@link Value} is just the most frequent.
*
* @param valueCounts the frequency of each value at each position
* @param type the type of the output
* @return the marginal impacts
*/
private List<Value> collapseMarginalImpacts(List<Map<Value, Long>> valueCounts, Type type) {
List<Value> yValues = new ArrayList<>();
if (Type.NUMBER.equals(type)) {
List<Double> doubles = valueCounts.stream().map(v -> v.entrySet().stream().map(e -> e.getKey().asNumber() * e.getValue() / config.getSeriesLength()).mapToDouble(d -> d).sum()).collect(Collectors.toList());
yValues = doubles.stream().map(Value::new).collect(Collectors.toList());
} else {
for (Map<Value, Long> item : valueCounts) {
long max = 0;
String output = null;
for (Map.Entry<Value, Long> entry : item.entrySet()) {
if (entry.getValue() > max) {
max = entry.getValue();
output = entry.getKey().asString();
}
}
yValues.add(new Value(output));
}
}
return yValues;
}
Aggregations