Search in sources :

Example 46 with PredictionOutput

use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method getModel.

private PredictionProvider getModel() throws IOException {
    InputStream is = getClass().getResourceAsStream("/opennlp/langdetect-183.bin");
    LanguageDetectorModel languageDetectorModel = new LanguageDetectorModel(is);
    LanguageDetector languageDetector = new LanguageDetectorME(languageDetectorModel);
    return inputs -> CompletableFuture.supplyAsync(() -> {
        List<PredictionOutput> results = new LinkedList<>();
        for (PredictionInput predictionInput : inputs) {
            StringBuilder builder = new StringBuilder();
            for (Feature f : predictionInput.getFeatures()) {
                if (builder.length() > 0) {
                    builder.append(' ');
                }
                builder.append(f.getValue().asString());
            }
            Language language = languageDetector.predictLanguage(builder.toString());
            PredictionOutput predictionOutput = new PredictionOutput(List.of(new Output("lang", Type.TEXT, new Value(language.getLang()), language.getConfidence())));
            results.add(predictionOutput);
        }
        return results;
    });
}
Also used : FeatureFactory(org.kie.kogito.explainability.model.FeatureFactory) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Arrays(java.util.Arrays) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) LanguageDetectorModel(opennlp.tools.langdetect.LanguageDetectorModel) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) Feature(org.kie.kogito.explainability.model.Feature) Prediction(org.kie.kogito.explainability.model.Prediction) TimeoutException(java.util.concurrent.TimeoutException) Random(java.util.Random) CompletableFuture(java.util.concurrent.CompletableFuture) Value(org.kie.kogito.explainability.model.Value) Function(java.util.function.Function) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) Saliency(org.kie.kogito.explainability.model.Saliency) ArrayList(java.util.ArrayList) LanguageDetector(opennlp.tools.langdetect.LanguageDetector) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) Map(java.util.Map) Assertions(org.assertj.core.api.Assertions) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) LinkedList(java.util.LinkedList) AssertionsForClassTypes.assertThat(org.assertj.core.api.AssertionsForClassTypes.assertThat) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) LimeConfigOptimizer(org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer) ValueSource(org.junit.jupiter.params.provider.ValueSource) LanguageDetectorME(opennlp.tools.langdetect.LanguageDetectorME) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) DataUtils(org.kie.kogito.explainability.utils.DataUtils) IOException(java.io.IOException) Type(org.kie.kogito.explainability.model.Type) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.jupiter.api.Test) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) List(java.util.List) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Language(opennlp.tools.langdetect.Language) ExplainabilityMetrics(org.kie.kogito.explainability.utils.ExplainabilityMetrics) Output(org.kie.kogito.explainability.model.Output) ValidationUtils(org.kie.kogito.explainability.utils.ValidationUtils) Config(org.kie.kogito.explainability.Config) Assertions.assertDoesNotThrow(org.junit.jupiter.api.Assertions.assertDoesNotThrow) InputStream(java.io.InputStream) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) InputStream(java.io.InputStream) LanguageDetectorME(opennlp.tools.langdetect.LanguageDetectorME) Feature(org.kie.kogito.explainability.model.Feature) LinkedList(java.util.LinkedList) LanguageDetector(opennlp.tools.langdetect.LanguageDetector) Language(opennlp.tools.langdetect.Language) LanguageDetectorModel(opennlp.tools.langdetect.LanguageDetectorModel) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) Value(org.kie.kogito.explainability.model.Value)

Example 47 with PredictionOutput

use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method testOpenNLPLangDetect.

@ParameterizedTest
@ValueSource(longs = { 0 })
void testOpenNLPLangDetect(long seed) throws Exception {
    Random random = new Random();
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(seed, random, 1));
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    PredictionProvider model = getModel();
    Function<String, List<String>> tokenizer = getTokenizer();
    PredictionInput testInput = getTestInput(tokenizer);
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(testInput)).get();
    assertNotNull(predictionOutputs);
    assertFalse(predictionOutputs.isEmpty());
    PredictionOutput output = predictionOutputs.get(0);
    assertNotNull(output);
    assertNotNull(output.getOutputs());
    assertEquals(1, output.getOutputs().size());
    assertEquals("ita", output.getOutputs().get(0).getValue().asString());
    assertEquals(0.03, output.getOutputs().get(0).getScore(), 1e-2);
    Prediction prediction = new SimplePrediction(testInput, output);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertNotNull(saliency);
        double i1 = ExplainabilityMetrics.impactScore(model, prediction, saliency.getPositiveFeatures(3));
        assertEquals(1d, i1);
    }
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 2, 0.6, 0.6));
    List<PredictionInput> inputs = getSamples(tokenizer);
    String decision = "lang";
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    int k = 2;
    int chunkSize = 2;
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(f1).isBetween(0.5d, 1d);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) Saliency(org.kie.kogito.explainability.model.Saliency) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 48 with PredictionOutput

use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method testExplanationWeightedStabilityWithOptimization.

@Test
void testExplanationWeightedStabilityWithOptimization() throws ExecutionException, InterruptedException, TimeoutException, IOException {
    PredictionProvider model = getModel();
    List<PredictionInput> samples = getSamples(getTokenizer());
    List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
    List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
    long seed = 0;
    LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).withSampling(false).withStepCountLimit(30).withWeightedStability(0.4, 0.6);
    Random random = new Random();
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(seed, random, 1));
    LimeConfig optimizedConfig = limeConfigOptimizer.optimize(limeConfig, predictions, model);
    Assertions.assertThat(optimizedConfig).isNotSameAs(limeConfig);
    LimeExplainer limeExplainer = new LimeExplainer(optimizedConfig);
    PredictionInput testPredictionInput = getTestInput(getTokenizer());
    List<PredictionOutput> testPredictionOutputs = model.predictAsync(List.of(testPredictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction instance = new SimplePrediction(testPredictionInput, testPredictionOutputs.get(0));
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, instance, limeExplainer, 1, 0.9, 0.8));
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) LimeConfigOptimizer(org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 49 with PredictionOutput

use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method testExplanationStabilityWithOptimization.

@Test
void testExplanationStabilityWithOptimization() throws ExecutionException, InterruptedException, TimeoutException, IOException {
    PredictionProvider model = getModel();
    List<PredictionInput> samples = getSamples(getTokenizer());
    List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
    List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
    long seed = 0;
    LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).withSampling(false).withStepCountLimit(30);
    Random random = new Random();
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(seed, random, 1));
    LimeConfig optimizedConfig = limeConfigOptimizer.optimize(limeConfig, predictions, model);
    Assertions.assertThat(optimizedConfig).isNotSameAs(limeConfig);
    LimeExplainer limeExplainer = new LimeExplainer(optimizedConfig);
    PredictionInput testPredictionInput = getTestInput(getTokenizer());
    List<PredictionOutput> testPredictionOutputs = model.predictAsync(List.of(testPredictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction instance = new SimplePrediction(testPredictionInput, testPredictionOutputs.get(0));
    assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, instance, limeExplainer, 1, 0.8, 0.8));
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Random(java.util.Random) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) LimeConfigOptimizer(org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 50 with PredictionOutput

use of org.kie.kogito.explainability.model.PredictionOutput in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method testExplanationImpactScoreWithOptimization.

@Test
void testExplanationImpactScoreWithOptimization() throws ExecutionException, InterruptedException, IOException {
    PredictionProvider model = getModel();
    List<PredictionInput> samples = getSamples(getTokenizer());
    List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
    List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
    long seed = 0;
    LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).forImpactScore().withSampling(false).withStepCountLimit(30);
    Random random = new Random();
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(seed, random, 1));
    LimeConfig optimizedConfig = limeConfigOptimizer.optimize(limeConfig, predictions, model);
    Assertions.assertThat(optimizedConfig).isNotSameAs(limeConfig);
}
Also used : PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) Random(java.util.Random) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) LimeConfigOptimizer(org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

PredictionOutput (org.kie.kogito.explainability.model.PredictionOutput)155 PredictionInput (org.kie.kogito.explainability.model.PredictionInput)137 PredictionProvider (org.kie.kogito.explainability.model.PredictionProvider)124 Prediction (org.kie.kogito.explainability.model.Prediction)122 Random (java.util.Random)90 Test (org.junit.jupiter.api.Test)90 SimplePrediction (org.kie.kogito.explainability.model.SimplePrediction)89 Feature (org.kie.kogito.explainability.model.Feature)80 ArrayList (java.util.ArrayList)74 Output (org.kie.kogito.explainability.model.Output)65 PerturbationContext (org.kie.kogito.explainability.model.PerturbationContext)65 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)55 LimeConfig (org.kie.kogito.explainability.local.lime.LimeConfig)52 LimeExplainer (org.kie.kogito.explainability.local.lime.LimeExplainer)50 Saliency (org.kie.kogito.explainability.model.Saliency)48 Value (org.kie.kogito.explainability.model.Value)47 LinkedList (java.util.LinkedList)37 List (java.util.List)36 LimeConfigOptimizer (org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer)33 ValueSource (org.junit.jupiter.params.provider.ValueSource)32