Search in sources :

Example 6 with PredictionInputsDataDistribution

use of org.kie.kogito.explainability.model.PredictionInputsDataDistribution in project kogito-apps by kiegroup.

the class FraudScoringDmnLimeExplainerTest method testFraudScoringDMNExplanation.

@Test
void testFraudScoringDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
    PredictionProvider model = getModel();
    PredictionInput predictionInput = getTestInput();
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
    Random random = new Random();
    PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertNotNull(saliency);
        List<FeatureImportance> topFeatures = saliency.getTopFeatures(4);
        double topScore = Math.abs(topFeatures.stream().map(FeatureImportance::getScore).findFirst().orElse(0d));
        if (!topFeatures.isEmpty() && topScore > 0) {
            double v = ExplainabilityMetrics.impactScore(model, prediction, topFeatures);
            // checks the drop of important features triggers a flipped prediction (or a significant drop in the output score).
            assertThat(v).isPositive();
        }
    }
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, // set to 0.4 since "Last Transaction" is inherently unstable output
    0.4));
    String decision = "Risk Score";
    List<PredictionInput> inputs = new ArrayList<>();
    for (int n = 0; n < 10; n++) {
        inputs.add(new PredictionInput(DataUtils.perturbFeatures(predictionInput.getFeatures(), perturbationContext)));
    }
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    int k = 2;
    int chunkSize = 2;
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    AssertionsForClassTypes.assertThat(f1).isBetween(0.5d, 1d);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) ArrayList(java.util.ArrayList) Saliency(org.kie.kogito.explainability.model.Saliency) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) FeatureImportance(org.kie.kogito.explainability.model.FeatureImportance) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) Test(org.junit.jupiter.api.Test)

Example 7 with PredictionInputsDataDistribution

use of org.kie.kogito.explainability.model.PredictionInputsDataDistribution in project kogito-apps by kiegroup.

the class LoanEligibilityDmnLimeExplainerTest method testLoanEligibilityDMNExplanation.

@Test
void testLoanEligibilityDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
    PredictionProvider model = getModel();
    PredictionInput predictionInput = getTestInput();
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
    Random random = new Random();
    PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
    LimeConfig limeConfig = new LimeConfig().withPerturbationContext(perturbationContext);
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertNotNull(saliency);
    }
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, 0.4));
    String decision = "Eligibility";
    List<PredictionInput> inputs = new ArrayList<>();
    for (int n = 0; n < 10; n++) {
        inputs.add(new PredictionInput(DataUtils.perturbFeatures(predictionInput.getFeatures(), perturbationContext)));
    }
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    int k = 2;
    int chunkSize = 2;
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    AssertionsForClassTypes.assertThat(f1).isBetween(0.5d, 1d);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) ArrayList(java.util.ArrayList) Saliency(org.kie.kogito.explainability.model.Saliency) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) Test(org.junit.jupiter.api.Test)

Example 8 with PredictionInputsDataDistribution

use of org.kie.kogito.explainability.model.PredictionInputsDataDistribution in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method testOpenNLPLangDetect.

@ParameterizedTest
@ValueSource(longs = { 0 })
void testOpenNLPLangDetect(long seed) throws Exception {
    Random random = new Random();
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(seed, random, 1));
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    PredictionProvider model = getModel();
    Function<String, List<String>> tokenizer = getTokenizer();
    PredictionInput testInput = getTestInput(tokenizer);
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(testInput)).get();
    assertNotNull(predictionOutputs);
    assertFalse(predictionOutputs.isEmpty());
    PredictionOutput output = predictionOutputs.get(0);
    assertNotNull(output);
    assertNotNull(output.getOutputs());
    assertEquals(1, output.getOutputs().size());
    assertEquals("ita", output.getOutputs().get(0).getValue().asString());
    assertEquals(0.03, output.getOutputs().get(0).getScore(), 1e-2);
    Prediction prediction = new SimplePrediction(testInput, output);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertNotNull(saliency);
        double i1 = ExplainabilityMetrics.impactScore(model, prediction, saliency.getPositiveFeatures(3));
        assertEquals(1d, i1);
    }
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 2, 0.6, 0.6));
    List<PredictionInput> inputs = getSamples(tokenizer);
    String decision = "lang";
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    int k = 2;
    int chunkSize = 2;
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(f1).isBetween(0.5d, 1d);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) Saliency(org.kie.kogito.explainability.model.Saliency) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 9 with PredictionInputsDataDistribution

use of org.kie.kogito.explainability.model.PredictionInputsDataDistribution in project kogito-apps by kiegroup.

the class PmmlCompoundScorecardLimeExplainerTest method testPMMLCompoundScorecard.

@Test
void testPMMLCompoundScorecard() throws Exception {
    Random random = new Random();
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(0L, random, 1));
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    PredictionInput input = getTestInput();
    PredictionProvider model = getModel();
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    assertThat(predictionOutputs).isNotNull();
    assertThat(predictionOutputs).isNotEmpty();
    PredictionOutput output = predictionOutputs.get(0);
    assertThat(output).isNotNull();
    Prediction prediction = new SimplePrediction(input, output);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertThat(saliency).isNotNull();
        double v = ExplainabilityMetrics.impactScore(model, prediction, saliency.getTopFeatures(2));
        assertThat(v).isEqualTo(1d);
    }
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.5, 0.5));
    List<PredictionInput> inputs = getSamples();
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    String decision = "score";
    int k = 1;
    int chunkSize = 2;
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    AssertionsForClassTypes.assertThat(f1).isBetween(0d, 1d);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) Saliency(org.kie.kogito.explainability.model.Saliency) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) Test(org.junit.jupiter.api.Test)

Example 10 with PredictionInputsDataDistribution

use of org.kie.kogito.explainability.model.PredictionInputsDataDistribution in project kogito-apps by kiegroup.

the class DummyModelsLimeExplainerTest method testUnusedFeatureClassification.

@ParameterizedTest
@ValueSource(longs = { 0 })
void testUnusedFeatureClassification(long seed) throws Exception {
    Random random = new Random();
    int idx = 2;
    List<Feature> features = new LinkedList<>();
    features.add(FeatureFactory.newNumericalFeature("f1", 6));
    features.add(FeatureFactory.newNumericalFeature("f2", 3));
    features.add(FeatureFactory.newNumericalFeature("f3", 5));
    PredictionProvider model = TestUtils.getEvenSumModel(idx);
    PredictionInput input = new PredictionInput(features);
    List<PredictionOutput> outputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction prediction = new SimplePrediction(input, outputs.get(0));
    LimeConfig limeConfig = new LimeConfig().withSamples(100).withPerturbationContext(new PerturbationContext(seed, random, 1));
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertNotNull(saliency);
        List<FeatureImportance> topFeatures = saliency.getTopFeatures(3);
        assertEquals(3, topFeatures.size());
        assertEquals(1d, ExplainabilityMetrics.impactScore(model, prediction, topFeatures));
    }
    int topK = 1;
    double minimumPositiveStabilityRate = 0.5;
    double minimumNegativeStabilityRate = 0.5;
    TestUtils.assertLimeStability(model, prediction, limeExplainer, topK, minimumPositiveStabilityRate, minimumNegativeStabilityRate);
    List<PredictionInput> inputs = new ArrayList<>();
    for (int i = 0; i < 100; i++) {
        List<Feature> fs = new LinkedList<>();
        fs.add(TestUtils.getMockedNumericFeature());
        fs.add(TestUtils.getMockedNumericFeature());
        fs.add(TestUtils.getMockedNumericFeature());
        inputs.add(new PredictionInput(fs));
    }
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    int k = 2;
    int chunkSize = 10;
    String decision = "sum-even-but" + idx;
    double precision = ExplainabilityMetrics.getLocalSaliencyPrecision(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(precision).isEqualTo(1);
    double recall = ExplainabilityMetrics.getLocalSaliencyRecall(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(recall).isEqualTo(1);
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(f1).isEqualTo(1);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) ArrayList(java.util.ArrayList) Saliency(org.kie.kogito.explainability.model.Saliency) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) Feature(org.kie.kogito.explainability.model.Feature) LinkedList(java.util.LinkedList) Random(java.util.Random) FeatureImportance(org.kie.kogito.explainability.model.FeatureImportance) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

PredictionInputsDataDistribution (org.kie.kogito.explainability.model.PredictionInputsDataDistribution)22 PredictionInput (org.kie.kogito.explainability.model.PredictionInput)21 PerturbationContext (org.kie.kogito.explainability.model.PerturbationContext)20 Prediction (org.kie.kogito.explainability.model.Prediction)20 PredictionProvider (org.kie.kogito.explainability.model.PredictionProvider)20 Random (java.util.Random)19 DataDistribution (org.kie.kogito.explainability.model.DataDistribution)19 PredictionOutput (org.kie.kogito.explainability.model.PredictionOutput)19 SimplePrediction (org.kie.kogito.explainability.model.SimplePrediction)19 Saliency (org.kie.kogito.explainability.model.Saliency)18 ArrayList (java.util.ArrayList)16 LimeConfig (org.kie.kogito.explainability.local.lime.LimeConfig)13 LimeExplainer (org.kie.kogito.explainability.local.lime.LimeExplainer)13 Test (org.junit.jupiter.api.Test)12 Feature (org.kie.kogito.explainability.model.Feature)12 FeatureImportance (org.kie.kogito.explainability.model.FeatureImportance)10 LinkedList (java.util.LinkedList)8 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)7 ValueSource (org.junit.jupiter.params.provider.ValueSource)7 HashMap (java.util.HashMap)5