use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class ShapResultsTest method buildShapResults.
ShapResults buildShapResults(int nOutputs, int nFeatures, int scalar1, int scalar2) {
Saliency[] saliencies = new Saliency[nOutputs];
for (int i = 0; i < nOutputs; i++) {
List<FeatureImportance> fis = new ArrayList<>();
for (int j = 0; j < nFeatures; j++) {
fis.add(new FeatureImportance(new Feature("f" + String.valueOf(j), Type.NUMBER, new Value(j)), i * j * scalar1));
}
saliencies[i] = new Saliency(new Output("o" + String.valueOf(i), Type.NUMBER, new Value(i), 1.0), fis);
}
RealVector fnull = MatrixUtils.createRealVector(new double[nOutputs]);
fnull.mapAddToSelf(scalar2);
return new ShapResults(saliencies, fnull);
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class ExplainabilityMetricsTest method testFidelityWithTextClassifier.
@Test
void testFidelityWithTextClassifier() throws ExecutionException, InterruptedException, TimeoutException {
List<Pair<Saliency, Prediction>> pairs = new LinkedList<>();
LimeConfig limeConfig = new LimeConfig().withSamples(10);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionProvider model = TestUtils.getDummyTextClassifier();
List<Feature> features = new LinkedList<>();
features.add(FeatureFactory.newFulltextFeature("f-0", "brown fox", s -> Arrays.asList(s.split(" "))));
features.add(FeatureFactory.newTextFeature("f-1", "money"));
PredictionInput input = new PredictionInput(features);
Prediction prediction = new SimplePrediction(input, model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit()).get(0));
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
pairs.add(Pair.of(saliency, prediction));
}
Assertions.assertDoesNotThrow(() -> {
ExplainabilityMetrics.classificationFidelity(pairs);
});
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method testPMMLScorecardCategorical.
@Test
void testPMMLScorecardCategorical() throws Exception {
PredictionInput input = getTestInput();
Random random = new Random();
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(0L, random, 1));
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionProvider model = getModel();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(predictionOutputs).isNotNull().isNotEmpty();
PredictionOutput output = predictionOutputs.get(0);
assertThat(output).isNotNull();
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertThat(saliency).isNotNull();
double v = ExplainabilityMetrics.impactScore(model, prediction, saliency.getTopFeatures(2));
assertThat(v).isGreaterThan(0d);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, 0.4));
List<PredictionInput> inputs = getSamples();
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
String decision = "score";
int k = 1;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0d, 1d);
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class LimeExplainerTest method testWithDataDistribution.
@Test
void testWithDataDistribution() throws InterruptedException, ExecutionException, TimeoutException {
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(4L, random, 1);
List<FeatureDistribution> featureDistributions = new ArrayList<>();
int nf = 4;
List<Feature> features = new ArrayList<>();
for (int i = 0; i < nf; i++) {
Feature numericalFeature = FeatureFactory.newNumericalFeature("f-" + i, Double.NaN);
features.add(numericalFeature);
List<Value> values = new ArrayList<>();
for (int r = 0; r < 4; r++) {
values.add(Type.NUMBER.randomValue(perturbationContext));
}
featureDistributions.add(new GenericFeatureDistribution(numericalFeature, values));
}
DataDistribution dataDistribution = new IndependentFeaturesDataDistribution(featureDistributions);
LimeConfig limeConfig = new LimeConfig().withDataDistribution(dataDistribution).withPerturbationContext(perturbationContext).withSamples(10);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionInput input = new PredictionInput(features);
PredictionProvider model = TestUtils.getSumThresholdModel(random.nextDouble(), random.nextDouble());
PredictionOutput output = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit()).get(0);
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(saliencyMap).isNotNull();
String decisionName = "inside";
Saliency saliency = saliencyMap.get(decisionName);
assertThat(saliency).isNotNull();
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class LimeExplainerTest method testSparseBalance.
@ParameterizedTest
@ValueSource(longs = { 0, 1, 2, 3, 4 })
void testSparseBalance(long seed) throws InterruptedException, ExecutionException, TimeoutException {
for (int nf = 1; nf < 4; nf++) {
Random random = new Random();
int noOfSamples = 100;
LimeConfig limeConfigNoPenalty = new LimeConfig().withPerturbationContext(new PerturbationContext(seed, random, DEFAULT_NO_OF_PERTURBATIONS)).withSamples(noOfSamples).withPenalizeBalanceSparse(false);
LimeExplainer limeExplainerNoPenalty = new LimeExplainer(limeConfigNoPenalty);
List<Feature> features = new ArrayList<>();
for (int i = 0; i < nf; i++) {
features.add(TestUtils.getMockedNumericFeature(i));
}
PredictionInput input = new PredictionInput(features);
PredictionProvider model = TestUtils.getSumSkipModel(0);
PredictionOutput output = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit()).get(0);
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMapNoPenalty = limeExplainerNoPenalty.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(saliencyMapNoPenalty).isNotNull();
String decisionName = "sum-but0";
Saliency saliencyNoPenalty = saliencyMapNoPenalty.get(decisionName);
LimeConfig limeConfig = new LimeConfig().withPerturbationContext(new PerturbationContext(seed, random, DEFAULT_NO_OF_PERTURBATIONS)).withSamples(noOfSamples).withPenalizeBalanceSparse(true);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(saliencyMap).isNotNull();
Saliency saliency = saliencyMap.get(decisionName);
for (int i = 0; i < features.size(); i++) {
double score = saliency.getPerFeatureImportance().get(i).getScore();
double scoreNoPenalty = saliencyNoPenalty.getPerFeatureImportance().get(i).getScore();
assertThat(Math.abs(score)).isLessThanOrEqualTo(Math.abs(scoreNoPenalty));
}
}
}
Aggregations