use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class DummyDmnModelsLimeExplainerTest method testFunctional2DMNExplanation.
@Test
void testFunctional2DMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
DMNRuntime dmnRuntime = DMNKogito.createGenericDMNRuntime(new InputStreamReader(getClass().getResourceAsStream("/dmn/functionalTest2.dmn")));
assertThat(dmnRuntime.getModels().size()).isEqualTo(1);
final String namespace = "https://kiegroup.org/dmn/_049CD980-1310-4B02-9E90-EFC57059F44A";
final String name = "new-file";
DecisionModel decisionModel = new DmnDecisionModel(dmnRuntime, namespace, name);
PredictionProvider model = new DecisionModelWrapper(decisionModel);
Map<String, Object> context = new HashMap<>();
context.put("numberInput", 1);
context.put("notUsedInput", 1);
List<Feature> features = new ArrayList<>();
features.add(FeatureFactory.newCompositeFeature("context", context));
PredictionInput predictionInput = new PredictionInput(features);
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertThat(saliency).isNotNull();
List<FeatureImportance> topFeatures = saliency.getPositiveFeatures(2);
assertThat(topFeatures.isEmpty()).isFalse();
assertThat(topFeatures.get(0).getFeature().getName()).isEqualTo("numberInput");
}
assertThatCode(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.5, 0.5)).doesNotThrowAnyException();
String decision = "decision";
List<PredictionInput> inputs = new ArrayList<>();
for (int n = 0; n < 10; n++) {
inputs.add(new PredictionInput(DataUtils.perturbFeatures(features, perturbationContext)));
}
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
int k = 2;
int chunkSize = 5;
double precision = ExplainabilityMetrics.getLocalSaliencyPrecision(decision, model, limeExplainer, distribution, k, chunkSize);
assertThat(precision).isBetween(0d, 1d);
double recall = ExplainabilityMetrics.getLocalSaliencyRecall(decision, model, limeExplainer, distribution, k, chunkSize);
assertThat(recall).isBetween(0d, 1d);
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
assertThat(f1).isBetween(0d, 1d);
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class FraudScoringDmnLimeExplainerTest method testFraudScoringDMNExplanation.
@Test
void testFraudScoringDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
PredictionInput predictionInput = getTestInput();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertNotNull(saliency);
List<FeatureImportance> topFeatures = saliency.getTopFeatures(4);
double topScore = Math.abs(topFeatures.stream().map(FeatureImportance::getScore).findFirst().orElse(0d));
if (!topFeatures.isEmpty() && topScore > 0) {
double v = ExplainabilityMetrics.impactScore(model, prediction, topFeatures);
// checks the drop of important features triggers a flipped prediction (or a significant drop in the output score).
assertThat(v).isPositive();
}
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, // set to 0.4 since "Last Transaction" is inherently unstable output
0.4));
String decision = "Risk Score";
List<PredictionInput> inputs = new ArrayList<>();
for (int n = 0; n < 10; n++) {
inputs.add(new PredictionInput(DataUtils.perturbFeatures(predictionInput.getFeatures(), perturbationContext)));
}
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
int k = 2;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0.5d, 1d);
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class LoanEligibilityDmnLimeExplainerTest method testLoanEligibilityDMNExplanation.
@Test
void testLoanEligibilityDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
PredictionInput predictionInput = getTestInput();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
LimeConfig limeConfig = new LimeConfig().withPerturbationContext(perturbationContext);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertNotNull(saliency);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, 0.4));
String decision = "Eligibility";
List<PredictionInput> inputs = new ArrayList<>();
for (int n = 0; n < 10; n++) {
inputs.add(new PredictionInput(DataUtils.perturbFeatures(predictionInput.getFeatures(), perturbationContext)));
}
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
int k = 2;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0.5d, 1d);
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class OpenNLPLimeExplainerTest method testOpenNLPLangDetect.
@ParameterizedTest
@ValueSource(longs = { 0 })
void testOpenNLPLangDetect(long seed) throws Exception {
Random random = new Random();
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(seed, random, 1));
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionProvider model = getModel();
Function<String, List<String>> tokenizer = getTokenizer();
PredictionInput testInput = getTestInput(tokenizer);
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(testInput)).get();
assertNotNull(predictionOutputs);
assertFalse(predictionOutputs.isEmpty());
PredictionOutput output = predictionOutputs.get(0);
assertNotNull(output);
assertNotNull(output.getOutputs());
assertEquals(1, output.getOutputs().size());
assertEquals("ita", output.getOutputs().get(0).getValue().asString());
assertEquals(0.03, output.getOutputs().get(0).getScore(), 1e-2);
Prediction prediction = new SimplePrediction(testInput, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertNotNull(saliency);
double i1 = ExplainabilityMetrics.impactScore(model, prediction, saliency.getPositiveFeatures(3));
assertEquals(1d, i1);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 2, 0.6, 0.6));
List<PredictionInput> inputs = getSamples(tokenizer);
String decision = "lang";
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
int k = 2;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
assertThat(f1).isBetween(0.5d, 1d);
}
use of org.kie.kogito.explainability.model.Saliency in project kogito-apps by kiegroup.
the class PmmlCompoundScorecardLimeExplainerTest method testPMMLCompoundScorecard.
@Test
void testPMMLCompoundScorecard() throws Exception {
Random random = new Random();
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(0L, random, 1));
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionInput input = getTestInput();
PredictionProvider model = getModel();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(predictionOutputs).isNotNull();
assertThat(predictionOutputs).isNotEmpty();
PredictionOutput output = predictionOutputs.get(0);
assertThat(output).isNotNull();
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertThat(saliency).isNotNull();
double v = ExplainabilityMetrics.impactScore(model, prediction, saliency.getTopFeatures(2));
assertThat(v).isEqualTo(1d);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.5, 0.5));
List<PredictionInput> inputs = getSamples();
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
String decision = "score";
int k = 1;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0d, 1d);
}
Aggregations