Search in sources :

Example 71 with Feature

use of org.kie.kogito.explainability.model.Feature in project kogito-apps by kiegroup.

the class DummyDmnModelsLimeExplainerTest method testFunctional2DMNExplanation.

@Test
void testFunctional2DMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
    DMNRuntime dmnRuntime = DMNKogito.createGenericDMNRuntime(new InputStreamReader(getClass().getResourceAsStream("/dmn/functionalTest2.dmn")));
    assertThat(dmnRuntime.getModels().size()).isEqualTo(1);
    final String namespace = "https://kiegroup.org/dmn/_049CD980-1310-4B02-9E90-EFC57059F44A";
    final String name = "new-file";
    DecisionModel decisionModel = new DmnDecisionModel(dmnRuntime, namespace, name);
    PredictionProvider model = new DecisionModelWrapper(decisionModel);
    Map<String, Object> context = new HashMap<>();
    context.put("numberInput", 1);
    context.put("notUsedInput", 1);
    List<Feature> features = new ArrayList<>();
    features.add(FeatureFactory.newCompositeFeature("context", context));
    PredictionInput predictionInput = new PredictionInput(features);
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(predictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction prediction = new SimplePrediction(predictionInput, predictionOutputs.get(0));
    Random random = new Random();
    PerturbationContext perturbationContext = new PerturbationContext(0L, random, 1);
    LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
    LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
    Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    for (Saliency saliency : saliencyMap.values()) {
        assertThat(saliency).isNotNull();
        List<FeatureImportance> topFeatures = saliency.getPositiveFeatures(2);
        assertThat(topFeatures.isEmpty()).isFalse();
        assertThat(topFeatures.get(0).getFeature().getName()).isEqualTo("numberInput");
    }
    assertThatCode(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.5, 0.5)).doesNotThrowAnyException();
    String decision = "decision";
    List<PredictionInput> inputs = new ArrayList<>();
    for (int n = 0; n < 10; n++) {
        inputs.add(new PredictionInput(DataUtils.perturbFeatures(features, perturbationContext)));
    }
    DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
    int k = 2;
    int chunkSize = 5;
    double precision = ExplainabilityMetrics.getLocalSaliencyPrecision(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(precision).isBetween(0d, 1d);
    double recall = ExplainabilityMetrics.getLocalSaliencyRecall(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(recall).isBetween(0d, 1d);
    double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
    assertThat(f1).isBetween(0d, 1d);
}
Also used : SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) HashMap(java.util.HashMap) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) ArrayList(java.util.ArrayList) DecisionModel(org.kie.kogito.decision.DecisionModel) DmnDecisionModel(org.kie.kogito.dmn.DmnDecisionModel) Saliency(org.kie.kogito.explainability.model.Saliency) Feature(org.kie.kogito.explainability.model.Feature) Random(java.util.Random) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) InputStreamReader(java.io.InputStreamReader) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) Prediction(org.kie.kogito.explainability.model.Prediction) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) DMNRuntime(org.kie.dmn.api.core.DMNRuntime) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) FeatureImportance(org.kie.kogito.explainability.model.FeatureImportance) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) DmnDecisionModel(org.kie.kogito.dmn.DmnDecisionModel) Test(org.junit.jupiter.api.Test)

Example 72 with Feature

use of org.kie.kogito.explainability.model.Feature in project kogito-apps by kiegroup.

the class LoanEligibilityDmnCounterfactualExplainerTest method testLoanEligibilityDMNExplanation.

@Test
void testLoanEligibilityDMNExplanation() throws ExecutionException, InterruptedException, TimeoutException {
    PredictionProvider model = getModel();
    final List<Output> goal = List.of(new Output("Is Enought?", Type.NUMBER, new Value(100), 0.0d), new Output("Eligibility", Type.TEXT, new Value("No"), 0.0d), new Output("Decide", Type.BOOLEAN, new Value(true), 0.0d));
    final TerminationConfig terminationConfig = new TerminationConfig().withScoreCalculationCountLimit(steps);
    final SolverConfig solverConfig = SolverConfigBuilder.builder().withTerminationConfig(terminationConfig).build();
    solverConfig.setRandomSeed(randomSeed);
    solverConfig.setEnvironmentMode(EnvironmentMode.REPRODUCIBLE);
    CounterfactualConfig config = new CounterfactualConfig();
    config.withSolverConfig(solverConfig);
    final CounterfactualExplainer explainer = new CounterfactualExplainer(config);
    PredictionInput input = getTestInput();
    PredictionOutput output = new PredictionOutput(goal);
    // test model
    List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
    Prediction prediction = new CounterfactualPrediction(input, output, null, UUID.randomUUID(), null);
    CounterfactualResult counterfactualResult = explainer.explainAsync(prediction, model).get();
    List<Feature> cfFeatures = counterfactualResult.getEntities().stream().map(CounterfactualEntity::asFeature).collect(Collectors.toList());
    List<Feature> unflattened = CompositeFeatureUtils.unflattenFeatures(cfFeatures, input.getFeatures());
    List<PredictionOutput> outputs = model.predictAsync(List.of(new PredictionInput(unflattened))).get();
    assertTrue(counterfactualResult.isValid());
    final Output decideOutput = outputs.get(0).getOutputs().get(2);
    assertEquals("Decide", decideOutput.getName());
    assertTrue((Boolean) decideOutput.getValue().getUnderlyingObject());
}
Also used : PredictionInput(org.kie.kogito.explainability.model.PredictionInput) Prediction(org.kie.kogito.explainability.model.Prediction) CounterfactualPrediction(org.kie.kogito.explainability.model.CounterfactualPrediction) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) Feature(org.kie.kogito.explainability.model.Feature) CounterfactualResult(org.kie.kogito.explainability.local.counterfactual.CounterfactualResult) CounterfactualPrediction(org.kie.kogito.explainability.model.CounterfactualPrediction) TerminationConfig(org.optaplanner.core.config.solver.termination.TerminationConfig) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) CounterfactualConfig(org.kie.kogito.explainability.local.counterfactual.CounterfactualConfig) Value(org.kie.kogito.explainability.model.Value) CounterfactualExplainer(org.kie.kogito.explainability.local.counterfactual.CounterfactualExplainer) SolverConfig(org.optaplanner.core.config.solver.SolverConfig) Test(org.junit.jupiter.api.Test)

Example 73 with Feature

use of org.kie.kogito.explainability.model.Feature in project kogito-apps by kiegroup.

the class OpenNLPLimeExplainerTest method getModel.

private PredictionProvider getModel() throws IOException {
    InputStream is = getClass().getResourceAsStream("/opennlp/langdetect-183.bin");
    LanguageDetectorModel languageDetectorModel = new LanguageDetectorModel(is);
    LanguageDetector languageDetector = new LanguageDetectorME(languageDetectorModel);
    return inputs -> CompletableFuture.supplyAsync(() -> {
        List<PredictionOutput> results = new LinkedList<>();
        for (PredictionInput predictionInput : inputs) {
            StringBuilder builder = new StringBuilder();
            for (Feature f : predictionInput.getFeatures()) {
                if (builder.length() > 0) {
                    builder.append(' ');
                }
                builder.append(f.getValue().asString());
            }
            Language language = languageDetector.predictLanguage(builder.toString());
            PredictionOutput predictionOutput = new PredictionOutput(List.of(new Output("lang", Type.TEXT, new Value(language.getLang()), language.getConfidence())));
            results.add(predictionOutput);
        }
        return results;
    });
}
Also used : FeatureFactory(org.kie.kogito.explainability.model.FeatureFactory) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Arrays(java.util.Arrays) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) LanguageDetectorModel(opennlp.tools.langdetect.LanguageDetectorModel) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) Feature(org.kie.kogito.explainability.model.Feature) Prediction(org.kie.kogito.explainability.model.Prediction) TimeoutException(java.util.concurrent.TimeoutException) Random(java.util.Random) CompletableFuture(java.util.concurrent.CompletableFuture) Value(org.kie.kogito.explainability.model.Value) Function(java.util.function.Function) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) Saliency(org.kie.kogito.explainability.model.Saliency) ArrayList(java.util.ArrayList) LanguageDetector(opennlp.tools.langdetect.LanguageDetector) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) Map(java.util.Map) Assertions(org.assertj.core.api.Assertions) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) LinkedList(java.util.LinkedList) AssertionsForClassTypes.assertThat(org.assertj.core.api.AssertionsForClassTypes.assertThat) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) LimeConfigOptimizer(org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer) ValueSource(org.junit.jupiter.params.provider.ValueSource) LanguageDetectorME(opennlp.tools.langdetect.LanguageDetectorME) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) DataUtils(org.kie.kogito.explainability.utils.DataUtils) IOException(java.io.IOException) Type(org.kie.kogito.explainability.model.Type) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.jupiter.api.Test) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) List(java.util.List) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Language(opennlp.tools.langdetect.Language) ExplainabilityMetrics(org.kie.kogito.explainability.utils.ExplainabilityMetrics) Output(org.kie.kogito.explainability.model.Output) ValidationUtils(org.kie.kogito.explainability.utils.ValidationUtils) Config(org.kie.kogito.explainability.Config) Assertions.assertDoesNotThrow(org.junit.jupiter.api.Assertions.assertDoesNotThrow) InputStream(java.io.InputStream) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) InputStream(java.io.InputStream) LanguageDetectorME(opennlp.tools.langdetect.LanguageDetectorME) Feature(org.kie.kogito.explainability.model.Feature) LinkedList(java.util.LinkedList) LanguageDetector(opennlp.tools.langdetect.LanguageDetector) Language(opennlp.tools.langdetect.Language) LanguageDetectorModel(opennlp.tools.langdetect.LanguageDetectorModel) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) Value(org.kie.kogito.explainability.model.Value)

Example 74 with Feature

use of org.kie.kogito.explainability.model.Feature in project kogito-apps by kiegroup.

the class OpenNLPPDPExplainerTest method testOpenNLPLangDetect.

@Test
void testOpenNLPLangDetect() throws Exception {
    PartialDependencePlotExplainer partialDependencePlotExplainer = new PartialDependencePlotExplainer();
    InputStream is = getClass().getResourceAsStream("/opennlp/langdetect-183.bin");
    LanguageDetectorModel languageDetectorModel = new LanguageDetectorModel(is);
    LanguageDetector languageDetector = new LanguageDetectorME(languageDetectorModel);
    PredictionProvider model = inputs -> CompletableFuture.supplyAsync(() -> {
        List<PredictionOutput> results = new ArrayList<>();
        for (PredictionInput predictionInput : inputs) {
            StringBuilder builder = new StringBuilder();
            for (Feature f : predictionInput.getFeatures()) {
                if (builder.length() > 0) {
                    builder.append(' ');
                }
                builder.append(f.getValue().asString());
            }
            Language language = languageDetector.predictLanguage(builder.toString());
            PredictionOutput predictionOutput = new PredictionOutput(List.of(new Output("lang", Type.TEXT, new Value(language.getLang()), language.getConfidence())));
            results.add(predictionOutput);
        }
        return results;
    });
    List<String> texts = List.of("we want your money", "please reply quickly", "you are the lucky winner", "italiani, spaghetti pizza mandolino", "guten tag", "allez les bleus", "daje roma");
    List<Prediction> predictions = new ArrayList<>();
    for (String text : texts) {
        List<Feature> features = new ArrayList<>();
        features.add(FeatureFactory.newFulltextFeature("text", text));
        PredictionInput predictionInput = new PredictionInput(features);
        PredictionOutput predictionOutput = model.predictAsync(List.of(predictionInput)).get().get(0);
        predictions.add(new SimplePrediction(predictionInput, predictionOutput));
    }
    List<PartialDependenceGraph> pdps = partialDependencePlotExplainer.explainFromPredictions(model, predictions);
    assertThat(pdps).isNotEmpty();
}
Also used : LanguageDetectorME(opennlp.tools.langdetect.LanguageDetectorME) FeatureFactory(org.kie.kogito.explainability.model.FeatureFactory) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) LanguageDetectorModel(opennlp.tools.langdetect.LanguageDetectorModel) Feature(org.kie.kogito.explainability.model.Feature) Prediction(org.kie.kogito.explainability.model.Prediction) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) CompletableFuture(java.util.concurrent.CompletableFuture) Value(org.kie.kogito.explainability.model.Value) Type(org.kie.kogito.explainability.model.Type) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) ArrayList(java.util.ArrayList) Test(org.junit.jupiter.api.Test) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) List(java.util.List) PartialDependencePlotExplainer(org.kie.kogito.explainability.global.pdp.PartialDependencePlotExplainer) Language(opennlp.tools.langdetect.Language) LanguageDetector(opennlp.tools.langdetect.LanguageDetector) PartialDependenceGraph(org.kie.kogito.explainability.model.PartialDependenceGraph) Output(org.kie.kogito.explainability.model.Output) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) InputStream(java.io.InputStream) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) InputStream(java.io.InputStream) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) Prediction(org.kie.kogito.explainability.model.Prediction) LanguageDetectorME(opennlp.tools.langdetect.LanguageDetectorME) ArrayList(java.util.ArrayList) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) Feature(org.kie.kogito.explainability.model.Feature) LanguageDetector(opennlp.tools.langdetect.LanguageDetector) PartialDependencePlotExplainer(org.kie.kogito.explainability.global.pdp.PartialDependencePlotExplainer) Language(opennlp.tools.langdetect.Language) LanguageDetectorModel(opennlp.tools.langdetect.LanguageDetectorModel) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Value(org.kie.kogito.explainability.model.Value) PartialDependenceGraph(org.kie.kogito.explainability.model.PartialDependenceGraph) Test(org.junit.jupiter.api.Test)

Example 75 with Feature

use of org.kie.kogito.explainability.model.Feature in project kogito-apps by kiegroup.

the class PmmlCompoundScorecardLimeExplainerTest method getModel.

private PredictionProvider getModel() {
    return inputs -> CompletableFuture.supplyAsync(() -> {
        List<PredictionOutput> outputs = new ArrayList<>(inputs.size());
        for (PredictionInput predictionInput : inputs) {
            List<Feature> inputFeatures = predictionInput.getFeatures();
            CompoundNestedPredicateScorecardExecutor pmmlModel = new CompoundNestedPredicateScorecardExecutor(inputFeatures.get(0).getValue().asNumber(), inputFeatures.get(1).getValue().asString());
            PMML4Result result = pmmlModel.execute(compoundScoreCardRuntime);
            Map<String, Object> resultVariables = result.getResultVariables();
            String score = "" + resultVariables.get(CompoundNestedPredicateScorecardExecutor.TARGET_FIELD);
            String reason1 = "" + resultVariables.get(CompoundNestedPredicateScorecardExecutor.REASON_CODE1_FIELD);
            PredictionOutput predictionOutput = new PredictionOutput(List.of(new Output("score", Type.TEXT, new Value(score), 1d), new Output("reason1", Type.TEXT, new Value(reason1), 1d)));
            outputs.add(predictionOutput);
        }
        return outputs;
    });
}
Also used : FeatureFactory(org.kie.kogito.explainability.model.FeatureFactory) PredictionInputsDataDistribution(org.kie.kogito.explainability.model.PredictionInputsDataDistribution) PerturbationContext(org.kie.kogito.explainability.model.PerturbationContext) PMMLRuntime(org.kie.pmml.api.runtime.PMMLRuntime) Feature(org.kie.kogito.explainability.model.Feature) Prediction(org.kie.kogito.explainability.model.Prediction) URISyntaxException(java.net.URISyntaxException) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) AssertionsForClassTypes(org.assertj.core.api.AssertionsForClassTypes) TimeoutException(java.util.concurrent.TimeoutException) Random(java.util.Random) CompletableFuture(java.util.concurrent.CompletableFuture) PMML4Result(org.kie.api.pmml.PMML4Result) Value(org.kie.kogito.explainability.model.Value) DataDistribution(org.kie.kogito.explainability.model.DataDistribution) Saliency(org.kie.kogito.explainability.model.Saliency) PMMLRuntimeFactoryInternal.getPMMLRuntime(org.kie.pmml.evaluator.assembler.factories.PMMLRuntimeFactoryInternal.getPMMLRuntime) ArrayList(java.util.ArrayList) BeforeAll(org.junit.jupiter.api.BeforeAll) Map(java.util.Map) LimeConfig(org.kie.kogito.explainability.local.lime.LimeConfig) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) LimeConfigOptimizer(org.kie.kogito.explainability.local.lime.optim.LimeConfigOptimizer) SimplePrediction(org.kie.kogito.explainability.model.SimplePrediction) LimeExplainer(org.kie.kogito.explainability.local.lime.LimeExplainer) DataUtils(org.kie.kogito.explainability.utils.DataUtils) Type(org.kie.kogito.explainability.model.Type) PredictionProvider(org.kie.kogito.explainability.model.PredictionProvider) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.jupiter.api.Test) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) List(java.util.List) ExplainabilityMetrics(org.kie.kogito.explainability.utils.ExplainabilityMetrics) Output(org.kie.kogito.explainability.model.Output) ValidationUtils(org.kie.kogito.explainability.utils.ValidationUtils) Config(org.kie.kogito.explainability.Config) Assertions.assertDoesNotThrow(org.junit.jupiter.api.Assertions.assertDoesNotThrow) PMML4Result(org.kie.api.pmml.PMML4Result) PredictionInput(org.kie.kogito.explainability.model.PredictionInput) ArrayList(java.util.ArrayList) Feature(org.kie.kogito.explainability.model.Feature) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) PredictionOutput(org.kie.kogito.explainability.model.PredictionOutput) Output(org.kie.kogito.explainability.model.Output) Value(org.kie.kogito.explainability.model.Value)

Aggregations

Feature (org.kie.kogito.explainability.model.Feature)233 PredictionOutput (org.kie.kogito.explainability.model.PredictionOutput)118 Test (org.junit.jupiter.api.Test)107 PredictionInput (org.kie.kogito.explainability.model.PredictionInput)107 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)104 Output (org.kie.kogito.explainability.model.Output)102 ArrayList (java.util.ArrayList)97 Random (java.util.Random)92 PredictionProvider (org.kie.kogito.explainability.model.PredictionProvider)78 Value (org.kie.kogito.explainability.model.Value)74 LinkedList (java.util.LinkedList)72 ValueSource (org.junit.jupiter.params.provider.ValueSource)71 Prediction (org.kie.kogito.explainability.model.Prediction)67 List (java.util.List)51 CounterfactualEntity (org.kie.kogito.explainability.local.counterfactual.entities.CounterfactualEntity)46 PerturbationContext (org.kie.kogito.explainability.model.PerturbationContext)42 Type (org.kie.kogito.explainability.model.Type)39 NumericalFeatureDomain (org.kie.kogito.explainability.model.domain.NumericalFeatureDomain)37 SimplePrediction (org.kie.kogito.explainability.model.SimplePrediction)35 FeatureDomain (org.kie.kogito.explainability.model.domain.FeatureDomain)33