use of org.kie.kogito.explainability.model.PredictionInput in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method testExplanationWeightedStabilityWithOptimization.
@Test
void testExplanationWeightedStabilityWithOptimization() throws ExecutionException, InterruptedException, TimeoutException {
PredictionProvider model = getModel();
List<PredictionInput> samples = getSamples();
List<PredictionOutput> predictionOutputs = model.predictAsync(samples.subList(0, 5)).get();
List<Prediction> predictions = DataUtils.getPredictions(samples, predictionOutputs);
long seed = 0;
LimeConfigOptimizer limeConfigOptimizer = new LimeConfigOptimizer().withDeterministicExecution(true).withWeightedStability(0.4, 0.6);
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(seed, random, 1);
LimeConfig initialConfig = new LimeConfig().withSamples(10).withPerturbationContext(perturbationContext);
LimeConfig optimizedConfig = limeConfigOptimizer.optimize(initialConfig, predictions, model);
assertThat(optimizedConfig).isNotSameAs(initialConfig);
LimeExplainer limeExplainer = new LimeExplainer(optimizedConfig);
PredictionInput testPredictionInput = getTestInput();
List<PredictionOutput> testPredictionOutputs = model.predictAsync(List.of(testPredictionInput)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
Prediction instance = new SimplePrediction(testPredictionInput, testPredictionOutputs.get(0));
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, instance, limeExplainer, 1, 0.5, 0.7));
}
use of org.kie.kogito.explainability.model.PredictionInput in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method getModel.
private PredictionProvider getModel() {
return inputs -> CompletableFuture.supplyAsync(() -> {
List<PredictionOutput> outputs = new ArrayList<>();
for (PredictionInput input1 : inputs) {
List<Feature> features1 = input1.getFeatures();
SimpleScorecardCategoricalExecutor pmmlModel = new SimpleScorecardCategoricalExecutor(features1.get(0).getValue().asString(), features1.get(1).getValue().asString());
PMML4Result result = pmmlModel.execute(scorecardCategoricalRuntime);
String score = "" + result.getResultVariables().get(SimpleScorecardCategoricalExecutor.TARGET_FIELD);
String reason1 = "" + result.getResultVariables().get(SimpleScorecardCategoricalExecutor.REASON_CODE1_FIELD);
String reason2 = "" + result.getResultVariables().get(SimpleScorecardCategoricalExecutor.REASON_CODE2_FIELD);
PredictionOutput predictionOutput = new PredictionOutput(List.of(new Output("score", Type.TEXT, new Value(score), 1d), new Output("reason1", Type.TEXT, new Value(reason1), 1d), new Output("reason2", Type.TEXT, new Value(reason2), 1d)));
outputs.add(predictionOutput);
}
return outputs;
});
}
use of org.kie.kogito.explainability.model.PredictionInput in project kogito-apps by kiegroup.
the class PmmlScorecardCategoricalLimeExplainerTest method testPMMLScorecardCategorical.
@Test
void testPMMLScorecardCategorical() throws Exception {
PredictionInput input = getTestInput();
Random random = new Random();
LimeConfig limeConfig = new LimeConfig().withSamples(10).withPerturbationContext(new PerturbationContext(0L, random, 1));
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionProvider model = getModel();
List<PredictionOutput> predictionOutputs = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(predictionOutputs).isNotNull().isNotEmpty();
PredictionOutput output = predictionOutputs.get(0);
assertThat(output).isNotNull();
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
for (Saliency saliency : saliencyMap.values()) {
assertThat(saliency).isNotNull();
double v = ExplainabilityMetrics.impactScore(model, prediction, saliency.getTopFeatures(2));
assertThat(v).isGreaterThan(0d);
}
assertDoesNotThrow(() -> ValidationUtils.validateLocalSaliencyStability(model, prediction, limeExplainer, 1, 0.4, 0.4));
List<PredictionInput> inputs = getSamples();
DataDistribution distribution = new PredictionInputsDataDistribution(inputs);
String decision = "score";
int k = 1;
int chunkSize = 2;
double f1 = ExplainabilityMetrics.getLocalSaliencyF1(decision, model, limeExplainer, distribution, k, chunkSize);
AssertionsForClassTypes.assertThat(f1).isBetween(0d, 1d);
}
use of org.kie.kogito.explainability.model.PredictionInput in project kogito-apps by kiegroup.
the class LimeExplainerTest method testWithDataDistribution.
@Test
void testWithDataDistribution() throws InterruptedException, ExecutionException, TimeoutException {
Random random = new Random();
PerturbationContext perturbationContext = new PerturbationContext(4L, random, 1);
List<FeatureDistribution> featureDistributions = new ArrayList<>();
int nf = 4;
List<Feature> features = new ArrayList<>();
for (int i = 0; i < nf; i++) {
Feature numericalFeature = FeatureFactory.newNumericalFeature("f-" + i, Double.NaN);
features.add(numericalFeature);
List<Value> values = new ArrayList<>();
for (int r = 0; r < 4; r++) {
values.add(Type.NUMBER.randomValue(perturbationContext));
}
featureDistributions.add(new GenericFeatureDistribution(numericalFeature, values));
}
DataDistribution dataDistribution = new IndependentFeaturesDataDistribution(featureDistributions);
LimeConfig limeConfig = new LimeConfig().withDataDistribution(dataDistribution).withPerturbationContext(perturbationContext).withSamples(10);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
PredictionInput input = new PredictionInput(features);
PredictionProvider model = TestUtils.getSumThresholdModel(random.nextDouble(), random.nextDouble());
PredictionOutput output = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit()).get(0);
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(saliencyMap).isNotNull();
String decisionName = "inside";
Saliency saliency = saliencyMap.get(decisionName);
assertThat(saliency).isNotNull();
}
use of org.kie.kogito.explainability.model.PredictionInput in project kogito-apps by kiegroup.
the class LimeExplainerTest method testSparseBalance.
@ParameterizedTest
@ValueSource(longs = { 0, 1, 2, 3, 4 })
void testSparseBalance(long seed) throws InterruptedException, ExecutionException, TimeoutException {
for (int nf = 1; nf < 4; nf++) {
Random random = new Random();
int noOfSamples = 100;
LimeConfig limeConfigNoPenalty = new LimeConfig().withPerturbationContext(new PerturbationContext(seed, random, DEFAULT_NO_OF_PERTURBATIONS)).withSamples(noOfSamples).withPenalizeBalanceSparse(false);
LimeExplainer limeExplainerNoPenalty = new LimeExplainer(limeConfigNoPenalty);
List<Feature> features = new ArrayList<>();
for (int i = 0; i < nf; i++) {
features.add(TestUtils.getMockedNumericFeature(i));
}
PredictionInput input = new PredictionInput(features);
PredictionProvider model = TestUtils.getSumSkipModel(0);
PredictionOutput output = model.predictAsync(List.of(input)).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit()).get(0);
Prediction prediction = new SimplePrediction(input, output);
Map<String, Saliency> saliencyMapNoPenalty = limeExplainerNoPenalty.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(saliencyMapNoPenalty).isNotNull();
String decisionName = "sum-but0";
Saliency saliencyNoPenalty = saliencyMapNoPenalty.get(decisionName);
LimeConfig limeConfig = new LimeConfig().withPerturbationContext(new PerturbationContext(seed, random, DEFAULT_NO_OF_PERTURBATIONS)).withSamples(noOfSamples).withPenalizeBalanceSparse(true);
LimeExplainer limeExplainer = new LimeExplainer(limeConfig);
Map<String, Saliency> saliencyMap = limeExplainer.explainAsync(prediction, model).get(Config.INSTANCE.getAsyncTimeout(), Config.INSTANCE.getAsyncTimeUnit());
assertThat(saliencyMap).isNotNull();
Saliency saliency = saliencyMap.get(decisionName);
for (int i = 0; i < features.size(); i++) {
double score = saliency.getPerFeatureImportance().get(i).getScore();
double scoreNoPenalty = saliencyNoPenalty.getPerFeatureImportance().get(i).getScore();
assertThat(Math.abs(score)).isLessThanOrEqualTo(Math.abs(scoreNoPenalty));
}
}
}
Aggregations