use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class FraudScoringDmnLimeExplainerTest method testExplanationWeightedStabilityWithOptimization.
@Test
void testExplanationWeightedStabilityWithOptimization() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
List<PredictionInput> samples = DmnTestUtils.randomFraudScoringInputs();
List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
long seed = 0;
LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).withWeightedStability(0.4, 0.6).withStepCountLimit(10);
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(seed, random, 1);
LimeConfig initialConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeConfig optimizedConfig = limeConfigOptimizer.optimize(initialConfig, predictions, model);
assertThat(optimizedConfig).isNotSameAs(initialConfig);
LimeExplainer limeExplainer = new LimeExplainer(optimizedConfig);
PredictionInput testPredictionInput = getTestInput();
List<PredictionOutput> testPredictionOutputs = model.predictAsync(List.of(testPredictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction instance = new SimplePrediction(testPredictionInput, testPredictionOutputs.get(0));
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, instance, limeExplainer, 1, 0.6, 0.4));
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class FraudScoringDmnLimeExplainerTest method testFraudScoringDMNExplanation.
@Test
void testFraudScoringDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
PredictionInput predictionInput = getTestInput();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertNotNull(saliency);
List<FeatureImportance> topFeatures = saliency.getTopFeatures(4);
double topScore = Math.abs(topFeatures.stream().map(FeatureImportance::getScore).findFirst().orElse(0d));
if (!topFeatures.isEmpty() && topScore > 0) {
double v = ExplainabilityMetrics.impactScore(model, prediction, topFeatures);
// checks the drop of important features triggers a flipped prediction (or a significant drop in the output score).
assertThat(v).isPositive();
}
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, // set to 0.4 since "Last Transaction" is inherently unstable output
0.4));
String decision = "Risk Score";
List<PredictionInput> inputs = new ArrayList<>();
for (int n = 0; n < 10; n++) {
inputs.add(new PredictionInput(DataUtils.perturbFeatures(predictionInput.getFeatures(), perturbationContext)));
}
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
int k = 2;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0.5d, 1d);
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class FraudScoringDmnPDPExplainerTest method testFraudScoringDMNExplanation.
@Test
void testFraudScoringDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
DMNRuntime dmnRuntime = DMNKogito.createGenericDMNRuntime(new InputStreamReader(getClass().getResourceAsStream("/dmn/fraud.dmn")));
assertEquals(1, dmnRuntime.getModels().size());
final String FRAUD_NS = "http://www.redhat.com/dmn/definitions/_81556584-7d78-4f8c-9d5f-b3cddb9b5c73";
final String FRAUD_NAME = "fraud-scoring";
DecisionModel decisionModel = new DmnDecisionModel(dmnRuntime, FRAUD_NS, FRAUD_NAME);
PredictionProvider model = new DecisionModelWrapper(decisionModel);
List<PredictionInput> inputs = DmnTestUtils.randomFraudScoringInputs();
List<PredictionOutput> predictionOutputs = model.predictAsync(inputs).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
List<Prediction> predictions = new ArrayList<>();
for (int i = 0; i < predictionOutputs.size(); i++) {
predictions.add(new SimplePrediction(inputs.get(i), predictionOutputs.get(i)));
}
PartialDependencePlotExplainer partialDependencePlotExplainer = new PartialDependencePlotExplainer();
List<PartialDependenceGraph> pdps = partialDependencePlotExplainer.explainFromPredictions(model, predictions);
assertThat(pdps).isNotNull();
Assertions.assertThat(pdps).hasSize(32);
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class LoanEligibilityDmnCounterfactualExplainerTest method testLoanEligibilityDMNExplanation.
@Test
void testLoanEligibilityDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
final List<Output> goal = List.of(new Output("Is Enought?", Type.NUMBER, new Value(100), 0.0d), new Output("Eligibility", Type.TEXT, new Value("No"), 0.0d), new Output("Decide", Type.BOOLEAN, new Value(true), 0.0d));
final TerminationConfig terminationConfig = new TerminationConfig().withScoreCalculationCountLimit(steps);
final SolverConfig solverConfig = SolverConfigBuilder.builder().withTerminationConfig(terminationConfig).build();
solverConfig.setRandomSeed(randomSeed);
solverConfig.setEnvironmentMode(EnvironmentMode.REPRODUCIBLE);
CounterfactualConfig config = new CounterfactualConfig();
config.withSolverConfig(solverConfig);
final CounterfactualExplainer explainer = new CounterfactualExplainer(config);
PredictionInput input = getTestInput();
PredictionOutput output = new PredictionOutput(goal);
// test model
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new CounterfactualPrediction(input, output, null, UUID.randomUUID(), null);
CounterfactualResult counterfactualResult = explainer.explainAsync(prediction, model).get();
List<Feature> cfFeatures = counterfactualResult.getEntities().stream().map(CounterfactualEntity::asFeature).collect(Collectors.toList());
List<Feature> unflattened = CompositeFeatureUtils.unflattenFeatures(cfFeatures, input.getFeatures());
List<PredictionOutput> outputs = model.predictAsync(List.of(new PredictionInput(unflattened))).get();
assertTrue(counterfactualResult.isValid());
final Output decideOutput = outputs.get(0).getOutputs().get(2);
assertEquals("Decide", decideOutput.getName());
assertTrue((Boolean) decideOutput.getValue().getUnderlyingObject());
}
use of org.kie.kogito.explainability.model.Prediction in project kogito-apps by kiegroup.
the class LoanEligibilityDmnLimeExplainerTest method testLoanEligibilityDMNExplanation.
@Test
void testLoanEligibilityDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
PredictionInput predictionInput = getTestInput();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
LimeConfig limeConfig = new LimeConfig().withPerturbationContext(perturbationContext);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertNotNull(saliency);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, 0.4));
String decision = "Eligibility";
List<PredictionInput> inputs = new ArrayList<>();
for (int n = 0; n < 10; n++) {
inputs.add(new PredictionInput(DataUtils.perturbFeatures(predictionInput.getFeatures(), perturbationContext)));
}
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
int k = 2;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0.5d, 1d);
}
Aggregations